prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `one_hot_categorical.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import categorical
from distrax._src.distributions import one_hot_categorical
from distrax._src.utils import equivalence
from distrax._src.utils import math
import jax
import jax.numpy as jnp
import numpy as np
import scipy
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
RTOL = 2e-3
class OneHotCategoricalTest(
equivalence.EquivalenceTest, parameterized.TestCase):
def setUp(self):
# pylint: disable=too-many-function-args
super().setUp(one_hot_categorical.OneHotCategorical)
self.p = np.asarray([0.1, 0.4, 0.2, 0.3])
self.logits = np.log(self.p) - 1.0 # intended unnormalization
self.assertion_fn = lambda x, y: np.testing.assert_allclose(x, y, rtol=RTOL)
def test_parameters_from_probs(self):
dist = self.distrax_cls(probs=self.p)
self.assertion_fn(dist.logits, math.normalize(logits=np.log(self.p)))
self.assertion_fn(dist.probs, math.normalize(probs=self.p))
def test_parameters_from_logits(self):
dist = self.distrax_cls(logits=self.logits)
self.assertion_fn(dist.logits, math.normalize(logits=self.logits))
self.assertion_fn(dist.probs, math.normalize(probs=self.p))
@parameterized.named_parameters(
('from probs', False),
('from logits', True))
def test_num_categories(self, from_logits):
dist_params = {'logits': self.logits} if from_logits else {'probs': self.p}
dist = self.distrax_cls(**dist_params)
np.testing.assert_equal(dist.num_categories, len(self.p))
@parameterized.named_parameters(
('1d logits', {'logits': [0.0, 1.0, -0.5]}),
('1d probs', {'probs': [0.2, 0.5, 0.3]}),
('2d logits', {'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]}),
('2d probs', {'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]}),
)
def test_event_shape(self, distr_params):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_event_shape((), distr_params)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape', {'logits': [0.0, 1.0, -0.5]}, ()),
('1d probs, no shape', {'probs': [0.2, 0.5, 0.3]}, ()),
('1d logits, int shape', {'logits': [0.0, 1.0, -0.5]}, 1),
('1d probs, int shape', {'probs': [0.2, 0.5, 0.3]}, 1),
('1d logits, 1-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (1,)),
('1d probs, 1-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (1,)),
('1d logits, 2-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (5, 4)),
('1d probs, 2-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (5, 4)),
('2d logits, no shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, ()),
('2d probs, no shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, ()),
('2d logits, int shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, 4),
('2d probs, int shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, 4),
('2d logits, 1-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5,)),
('2d probs, 1-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5,)),
('2d logits, 2-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5, 4)),
('2d probs, 2-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5, 4)),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_sample_shape(
dist_args=(),
dist_kwargs=distr_params,
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape', {'logits': [0.0, 1.0, -0.5]}, ()),
('1d probs, no shape', {'probs': [0.2, 0.5, 0.3]}, ()),
('1d logits, int shape', {'logits': [0.0, 1.0, -0.5]}, 1),
('1d probs, int shape', {'probs': [0.2, 0.5, 0.3]}, 1),
('1d logits, 1-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (1,)),
('1d probs, 1-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (1,)),
('1d logits, 2-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (5, 4)),
('1d probs, 2-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (5, 4)),
('2d logits, no shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, ()),
('2d probs, no shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, ()),
('2d logits, int shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, 4),
('2d probs, int shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, 4),
('2d logits, 1-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5,)),
('2d probs, 1-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5,)),
('2d logits, 2-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5, 4)),
('2d probs, 2-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5, 4)),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_sample_and_log_prob(
dist_args=(),
dist_kwargs=distr_params,
sample_shape=sample_shape,
assertion_fn=self.assertion_fn)
@chex.all_variants
@parameterized.named_parameters(
('int32', jnp.int32),
('int64', jnp.int64),
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist_params = {'logits': self.logits, 'dtype': dtype}
dist = self.distrax_cls(**dist_params)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('from probs', False),
('from logits', True))
def test_sample_unique_values(self, from_logits):
dist_params = {'logits': self.logits} if from_logits else {'probs': self.p}
dist = self.distrax_cls(**dist_params)
sample_fn = self.variant(
lambda key: dist.sample(seed=key, sample_shape=100))
samples = sample_fn(self.key)
np.testing.assert_equal(np.unique(samples), np.arange(2))
@chex.all_variants
def test_sample_extreme_probs(self):
dist_params = {'probs': np.asarray([1., 0., 0., 0.])}
dist = self.distrax_cls(**dist_params)
sample_fn = self.variant(
lambda key: dist.sample(seed=key, sample_shape=100))
samples = sample_fn(self.key)
np.testing.assert_equal(np.unique(samples[..., 0]), 1)
np.testing.assert_equal(np.unique(samples[..., 1:]), 0)
@chex.all_variants
@parameterized.named_parameters(
('log_prob; 1d logits, 1 input',
'log_prob',
{'logits': [0.0, 0.5, -0.5]},
[1, 0, 0]),
('log_prob; 1d logits, 2 inputs',
'log_prob',
{'logits': [0.0, 0.5, -0.5]},
[[1, 0, 0], [0, 1, 0]]),
('log_prob; 2d logits, 2 inputs',
'log_prob',
{'logits': [[0.0, 0.5, -0.5], [-0.1, 0.1, 0.1]]},
[[1, 0, 0], [0, 1, 0]]),
('log_prob; 2d logits, rank-3 inputs',
'log_prob',
{'logits': [[0.0, 0.5, -0.5], [-0.1, 0.1, 0.1]]},
np.asarray([[1, 0, 0], [0, 1, 0]])[None, ...]),
('log_prob; 1d probs, 1 input',
'log_prob',
{'probs': [0.3, 0.2, 0.5]},
[1, 0, 0]),
('log_prob; 1d probs, 2 inputs',
'log_prob',
{'probs': [0.3, 0.2, 0.5]},
[[1, 0, 0], [0, 1, 0]]),
('log_prob; 2d probs, 2 inputs',
'log_prob',
{'probs': [[0.2, 0.4, 0.4], [0.1, 0.2, 0.7]]},
[[1, 0, 0], [0, 1, 0]]),
('log_prob; 2d probs, rank-3 inputs',
'log_prob',
{'probs': [[0.2, 0.4, 0.4], [0.1, 0.2, 0.7]]},
np.asarray([[1, 0, 0], [0, 1, 0]])[None, ...]),
('log_prob; unnormalized probs',
'log_prob',
{'probs': [0.1, 0.2, 0.3]},
[[0, 0, 1], [0, 1, 0], [1, 0, 0]]),
('prob; 1d logits, 1 input',
'prob',
{'logits': [0.0, 0.5, -0.5]},
[1, 0, 0]),
('prob; 1d logits, 2 inputs',
'prob',
{'logits': [0.0, 0.5, -0.5]},
[[1, 0, 0], [0, 1, 0]]),
('prob; 2d logits, 2 inputs',
'prob',
{'logits': [[0.0, 0.5, -0.5], [-0.1, 0.1, 0.1]]},
[[1, 0, 0], [0, 1, 0]]),
('prob; 2d logits, rank-3 inputs',
'prob',
{'logits': [[0.0, 0.5, -0.5], [-0.1, 0.1, 0.1]]},
np.asarray([[1, 0, 0], [0, 1, 0]])[None, ...]),
('prob; 1d probs, 1 input',
'prob',
{'probs': [0.3, 0.2, 0.5]},
[1, 0, 0]),
('prob; 1d probs, 2 inputs',
'prob',
{'probs': [0.3, 0.2, 0.5]},
[[1, 0, 0], [0, 1, 0]]),
('prob; 2d probs, 2 inputs',
'prob',
{'probs': [[0.2, 0.4, 0.4], [0.1, 0.2, 0.7]]},
[[1, 0, 0], [0, 1, 0]]),
('prob; 2d probs, rank-3 inputs',
'prob',
{'probs': [[0.2, 0.4, 0.4], [0.1, 0.2, 0.7]]},
np.asarray([[1, 0, 0], [0, 1, 0]])[None, ...]),
('prob; unnormalized probs',
'prob',
{'probs': [0.1, 0.2, 0.3]},
[[0, 0, 1], [0, 1, 0], [1, 0, 0]]),
)
def test_pdf(self, function_string, distr_params, value):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
value = | np.array(value) | numpy.array |
#!/usr/bin/env python
import numpy as np
import copy
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import inspect
import matplotlib.ticker as ticker
from netCDF4 import Dataset
class ncdisp:
def map(self,var,y1=None,y2=None,x1=None,x2=None,y='I',x='J',loc=[],\
m=None,p='cyl',edge='n',face='y',intp='n',cbar='y',ref=None,**kwargs):
"""
Display the geographic map.
Parameters
----------
var : variable to display;
Could be a extracted matrix or a string indicating the variable name
y, x : [ 'I' | 'J' | ... | 'i' | 'j' | ... |], default: 'I', 'J'
Dimension of the data.
If lowercase, 'y1' ('x1') and 'y2' ('x2') should be position, e.g. latitude/longitude/time
Otherwise, 'y1' ('x1') and 'y2' ('x2') should be indexes
y1, y2, x1, x2 : map boundaries, default: None
loc : [['I',1...] | ['i',1...]], default: []
Locator of the variable on other dimensions.
When the letters are uppercases, the number after it should be pixel index;
When the letters are lowercases, the number after it should be exact position (e.g. latitude/longitude/time)
m : basemap instance, default: None
If m is given, draw the map on m; otherwise, draw on a new basemap.
p (short for projection) : [ None | projection for mpl_toolkits.basemap.Basemap], default: 'cyl'
If 'none', common pcolormesh or imshow will be invoked
edge : [ 'y' | 'n' ], default: 'n'
Whether (y) of not (n) to draw contour lines
face : [ 'y' | 'n' ], default: 'y'
Whether (y) of not (n) to draw contour surface
cbar : [ 'y' | 'n' ], default: 'y'
If 'y', draw the colormap attached to the map.
ref : netCDF4.Variable object, default: None
Variable for reference of dimensions. If None, self._refvar will be used.
**kwargs :
Dict with up to four inferior dict (optional) are available:
'bm' : **bmkwargs, passed on to mpl_toolkits.basemap.Basemap
'map' : **mapkwargs, passed on to matplotlib.pyplot.contour(f) or matplotlib.pyplot.pcolormesh or matplotlib.pyplot.imshow
'cb' : **cbkwargs, passed on to matplotlib.pyplot.colorbar
Returns
-------
ax : axes
m : object of basemap or axes
im : image
cb : colorbar
Example:
nc=ncload('sample')
v=nc.get('samplevar')
ax,m,im,cb=nc.map(v,loc=['K',1,'O',1],p='nplaea',intp='y',bm={'boundinglat':20},map={'cmap':plt.get_cmap('bwr')},cb={'boundaries':range(5)}})
"""
nc=self.nc
dims=self.dims
dimkeys=self.dimkeys
bmspec=inspect.getargspec(self._defaultbmargs)
bmkwargs=('bm' in kwargs and kwargs['bm']) or {}
mapspec=inspect.getargspec(self._defaultmapargs)
mapkwargs=('map' in kwargs and kwargs['map']) or {}
cbkwargs=('cb' in kwargs and kwargs['cb']) or {}
dic=dict(zip(bmspec[0],bmspec[-1]))
for key in dic.keys():
if key not in bmkwargs:bmkwargs[key]=dic[key]
dic=dict(zip(mapspec[0],mapspec[-1]))
for key in dic.keys():
if key not in mapkwargs:mapkwargs[key]=dic[key]
if loc:
locdims=[loc[i*2] for i in range(len(loc)/2)]
locposs=[loc[i*2+1] for i in range(len(loc)/2)]
index=[slice(0,k) for k in var.shape]
if isinstance(var,netCDF4.Variable):ref=var
elif ref==None:ref=self._refvar
for d in locdims:index[ref.dimensions.index(dimkeys[d.upper()])]=locposs[locdims.index(d)]
var=var[index]
coordy=dims[y.upper()]
coordx=dims[x.upper()]
if coordy[0]>coordy[-1]:
var=var[::-1]
coordy=coordy[::-1]
if y.isupper():
ytemp=y1
if y2!=None:y1=len(coordy)-y2
else:y1=None
if ytemp!=None:y2=len(coordy)-ytemp
else:y2=None
if coordx[0]>coordx[-1]:
var=var[:,::-1]
coordx=coordx[::-1]
if x.isupper():
xtemp=x1
if x2!=None:x1=len(coordx)-x2
else:x1=None
if xtemp!=None:x2=len(coordx)-xtemp
else:x2=None
if 'lat' in dimkeys[y.upper()].lower():
latmin=np.max([-90.,coordy[0]-(coordy[1]-coordy[0])/2.])
latmax=np.min([90.,coordy[-1]+(coordy[-1]-coordy[-2])/2.])
elif 'lon' in dimkeys[y.upper()].lower():
coordy=np.hstack((coordy-360.,coordy,coordy+360.))
coordymask=(coordy>=-180.)*(coordy<=180.)
coordy=coordy[coordymask]
coordy=coordy[(coordy[0]==coordy[1] and 1) or 0:(coordy[-1]==coordy[-2] and -1) or None]
var=np.ma.vstack((var,var,var))
var=var[coordymask,:]
var=var[(coordy[0]==coordy[1] and 1) or 0:(coordy[-1]==coordy[-2] and -1) or None,:]
latmin=np.max([-180.,coordy[0]-(coordy[1]-coordy[0])/2.])
latmax=np.min([180.,coordy[-1]+(coordy[-1]-coordy[-2])/2.])
else:
latmin=latmax=np.nan
if 'lat' in dimkeys[x.upper()].lower():
lonmin=np.max([-90.,coordx[0]-(coordx[1]-coordx[0])/2.])
lonmax=np.min([90.,coordx[-1]+(coordx[-1]-coordx[-2])/2.])
elif 'lon' in dimkeys[x.upper()].lower():
coordx=np.hstack((coordx-360.,coordx,coordx+360.))
coordxmask=(coordx>=-180.)*(coordx<=180.)
coordx=coordx[coordxmask]
coordx=coordx[(coordx[0]==coordx[1] and 1) or 0:(coordx[-1]==coordx[-2] and -1) or None]
var=np.ma.hstack((var,var,var))
var=var[:,coordxmask]
var=var[:,(coordx[0]==coordx[1] and 1) or 0:(coordx[-1]==coordx[-2] and -1) or None]
lonmin=np.max([-180.,coordx[0]-(coordx[1]-coordx[0])/2.])
lonmax=np.min([180.,coordx[-1]+(coordx[-1]-coordx[-2])/2.])
else:
lonmin=lonmax=np.nan
if p in ['nplaea','ortho']:
crnryy=(coordy[:-1]+coordy[1:])/2.
crnryy=np.insert(crnryy,0,coordy[0]-(coordy[1]-coordy[0])/2.)
crnryy= | np.append(crnryy,coordy[-1]+(coordy[-1]-coordy[-2])/2.) | numpy.append |
import numpy as np
import pickle
import matplotlib.pyplot as plt
import csv
import pandas
folder = "GP/"
ktype = "lin"
lambd = [0.01,0.1,1.0]
prdf="newest"
data=pandas.read_csv(folder+ktype+'/'+prdf+'.csv',', ')
names=list(data)
plt.figure(0,figsize=(20,6))
# Enable interactive mode
plt.ion()
plt.title("Linear kernel with GP description",fontsize=24)
plt.xlabel("first coefficient (c) value",fontsize=20)
plt.ylabel("RMSE [eV/atom]",fontsize=20)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# Draw the grid lines
plt.grid(True)
for l in lambd:
results = data.loc[data[names[0]] == l]
x=np.asarray(results[results.columns[1]])
sort = x.argsort()
y=np.asarray(results[results.columns[3]])
plt.plot(x[sort],y[sort], marker='x', linestyle='dashed', linewidth=2, markersize=8, label="using lambda = "+str(l))
plt.xscale('symlog', linthreshx=20)
plt.ylim(0.1,0.5)
plt.legend(loc='upper left', fontsize=20)
plt.show()
plt.savefig('kernel_lin.png')
# Draw the grid lines
plt.figure(3)
plt.grid(True)
c=100
results = data.loc[data[names[1]] == c]
x=np.asarray(results[results.columns[0]])
sort = x.argsort()
y1=np.asarray(results[results.columns[3]])
y2=np.asarray(results[results.columns[2]])
plt.plot(x[sort],y1[sort], marker='x', linestyle='dashed', linewidth=2, markersize=8, label="Cross-validation set")
plt.plot(x[sort],y2[sort], marker='x', linestyle='dashed', linewidth=2, markersize=8, label="Training set")
plt.legend(loc='lower right')
plt.title("Effect of Regularization", fontsize=20)
plt.ylabel("RMSE [eV/atom]",fontsize=17)
plt.xlabel("Regularization value ($\lambda$)",fontsize=17)
plt.tight_layout()
plt.savefig("lin_lam.png")
plt.show()
#GAUSS
lambd = [0.001,0.01,0.1]
ktype="gauss"
data=pandas.read_csv(folder+ktype+'/'+prdf+'.csv',', ')
names=list(data)
plt.figure(1)
# Enable interactive mode
plt.ion()
plt.title("Gaussian kernel with GP description", fontsize=20)
plt.xlabel("Sigma value", fontsize=17)
plt.ylabel("RMSE [eV/atom]", fontsize=17)
# Draw the grid lines
plt.grid(True)
for l in lambd:
results = data.loc[data[names[0]] == l]
x=np.asarray(results[results.columns[1]])
sort = x.argsort()
y=np.asarray(results[results.columns[3]])
plt.plot(x[sort],y[sort], marker='x', linestyle='dashed', linewidth=2, markersize=8, label="using lambda = "+str(l))
plt.ylim(0.2,0.7)
plt.legend(loc='lower right', fontsize=17)
plt.savefig("kernel_gauss.png")
plt.show()
plt.figure(4)
# Enable interactive mode
plt.ion()
plt.title("Gaussian kernel with GP description", fontsize=20)
plt.ylabel("RMSE [eV/atom]", fontsize=17)
# Draw the grid lines
plt.grid(True)
c=5
results = data.loc[data[names[1]] == c]
x=np.asarray(results[results.columns[0]])
sort = x.argsort()
y1=np.asarray(results[results.columns[3]])
y2=np.asarray(results[results.columns[2]])
plt.plot(x[sort],y1[sort], marker='x', linestyle='dashed', linewidth=2, markersize=8, label="Cross-validation set")
plt.plot(x[sort],y2[sort], marker='x', linestyle='dashed', linewidth=2, markersize=8, label="Training set")
plt.legend(loc='lower right', fontsize=17)
plt.title("Gaussian kernel with GP description", fontsize=20)
plt.xlabel("Regularization value ($\lambda$)",fontsize=17)
plt.xscale('log')
plt.savefig("gauss_lam.png")
plt.show()
#Laplace
ktype="laplace"
data=pandas.read_csv(folder+ktype+'/'+prdf+'.csv',', ')
names=list(data)
plt.figure(2)
# Enable interactive mode
plt.ion()
plt.title("Laplacian kernel with GP description")
plt.xlabel("Sigma value")
plt.ylabel("RMSE [eV/atom]",fontsize=17)
# Draw the grid lines
plt.grid(True)
for l in lambd:
results = data.loc[data[names[0]] == l]
x=np.asarray(results[results.columns[1]])
sort = x.argsort()
y=np.asarray(results[results.columns[3]])
plt.plot(x[sort],y[sort], marker='x', linestyle='dashed', linewidth=2, markersize=8, label="using lambda = "+str(l))
plt.ylim(0.2,0.7)
plt.legend(loc='upper left', fontsize=17)
plt.savefig("kernel_lap.png")
plt.show()
#TRAINING AND VALIDATION SET PLOTS
plt.figure(5)
# Enable interactive mode
plt.ion()
plt.title("Laplacian kernel with GP description", fontsize=20)
# Draw the grid lines
plt.grid(True)
c=5
results = data.loc[data[names[1]] == c]
x=np.asarray(results[results.columns[0]])
sort = x.argsort()
y1=np.asarray(results[results.columns[3]])
y2= | np.asarray(results[results.columns[2]]) | numpy.asarray |
#!/usr/bin/env python
import InstrumentDriver
import numpy as np
import configparser
from pathlib import Path
from numpy import genfromtxt
class Driver(InstrumentDriver.InstrumentWorker):
""" This class implements Z-Crosstalk Compensation"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
# init variables
def updateMatrix(self):
"""Set matrix elements from the csv file
"""
# return directly if not in use
path = self.getValue('Crosstalk Matrix')
full_matrix = genfromtxt(path, delimiter=',')
n = int(self.getValue('Number of Z-Control Lines'))
matrix = full_matrix[1:, 1:]
if (matrix.shape[0] != n):
raise ValueError("Matrix File qubit number does not equal Number of Z-Control Lines")
for i in range(n):
for j in range(n):
self.setValue('M'+str(i+1)+ '-' +str(j+1), matrix[i, j])
def updateTuningCurves(self):
path = self.getValue('Tuning Curves')
tuning_parameters = genfromtxt(path, delimiter=',')
n = int(self.getValue('Number of Z-Control Lines'))
fmax = np.zeros(n)
fmin = np.zeros(n)
V_0 = np.zeros(n)
#need to check the input dimension
if (tuning_parameters.shape[0] != n+1):
raise ValueError("Tuning Curve File qubit number does not equal Number of Z-Control Lines")
for i in range(n):
fmax[i] = tuning_parameters[i+1, 1]
fmin[i] = tuning_parameters[i+1, 2]
V_0[i] = tuning_parameters[i+1, 3]
self.setValue('f max q' + str(i+1), fmax[i])
self.setValue('f min q' + str(i+1), fmin[i])
self.setValue('V0 q' + str(i+1), V_0[i])
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# do nothing, just return value
if (quant.name == 'Load Crosstalk Matrix'):
self.updateMatrix()
elif (quant.name == 'Load Tuning Curves'):
self.updateTuningCurves()
elif (quant.name == 'Confirm'):
self.freqToVoltage()
elif (quant.name == 'Do Conversion'):
self.doConversion()
elif ('f target' in quant.name):
self.freqToVoltage()
self.doConversion()
return value
def performGetValue(self, quant, options={}):
"""Perform the Get Value instrument operation"""
# check type of quantity
if quant.isVector():
pass
else:
if (quant.name == 'Confirm'):
self.freqToVoltage()
elif 'f target' in quant.name:
self.freqToVoltage()
self.doConversion()
# for all other cases, do nothing
value = quant.getValue()
return value
def freqToVoltage(self):
"""Perform the non linear frequency and voltage conversion, used in the conversion matrix"""
n = int(self.getValue('Number of Z-Control Lines'))
f_max = | np.zeros(n) | numpy.zeros |
# License: BSD 3 clause
import itertools
import sys
import warnings
import numpy as np
from numpy.polynomial.legendre import leggauss
from scipy.linalg import solve
from tick.base import Base, ThreadPool
from tick.hawkes.inference.build.hawkes_inference import (PointProcessCondLaw)
# noinspection PyPep8Naming
class HawkesConditionalLaw(Base):
"""This class is used for performing non parametric estimation of
multi-dimensional marked Hawkes processes based on conditional laws.
Marked Hawkes processes are point processes defined by the intensity:
.. math::
\\forall i \\in [1 \\dots D], \\quad
\\lambda_i = \\mu_i + \\sum_{j=1}^D \\int \\phi_{ij} * f_{ij}(v_j) dN_j
where
* :math:`D` is the number of nodes
* :math:`\mu_i` are the baseline intensities
* :math:`\phi_{ij}` are the kernels
* :math:`v_j` are the marks (considered iid) of the process :math:`N_j`
* :math:`f_{ij}` the mark functions supposed to be piece-wise constant
on intervals :math:`I^j(l)`
The estimation is made from empirical computations of
.. math::
\\lim_{\\epsilon \\rightarrow 0}
E [ (N_i[t + lag + \\delta + \\epsilon] -
\Lambda[t + lag + \\epsilon]) | N_j[t]=1
\quad \& \quad
v_j(t) \in I^j(l) ]
For all the possible values of :math:`i`, :math:`i` and :math:`l`.
The :math:`lag` is sampled on a uniform grid defined by
:math:`\\delta`: :math:`lag = n * \\delta`.
Estimation can be performed using several realizations.
Parameters
----------
claw_method : {'lin', 'log'}, default='lin'
Specifies the way the conditional laws are sampled. It can be either:
* 'lin' : sampling is linear on [0, max_lag] using sampling period
delta_lag
* 'log' : sampling is semi-log. It uses linear sampling on [0, min_lag]
with sampling period delta_lag and log sampling on [min_lag, max_lag]
using :math:`\\exp(\\delta)` sampling period.
delta_lag : `float`, default=0.1
See claw_methods
min_lag : `float`, default=1e-4
See claw_methods
max_lag : `float`, default=40
See claw_methods
quad_method : {'gauss', 'lin', 'log'}, default=gauss
Sampling used for quadrature
* 'gauss' for gaussian quadrature
* 'lin' for linear quadrature
* 'log' for log quadrature
min_support : `float`, default=1e-4
Start value of kernel estimation. It is used for 'log' quadrature
method only, otherwise it is set to 0.
max_support : `float`, default=40
End value of kernel estimation
n_quad : `int` : default=50
The number of quadrature points between [min_support, max_support]
used for solving the system.
Be aware that the complexity increase as this number squared.
n_threads : `int`, default=1
Number of threads used for parallel computation.
* if `int <= 0`: the number of physical cores available on the CPU
* otherwise the desired number of threads
Other Parameters
----------------
delayed_component : list of `int`, shape=(n_nodes, ), default=None
list of node indices corresponding to node that should be delayed
(to avoid simultaneous jumps of different components which can be a
problem in the estimation)
delay : `float`
The delayed used for `delayed_component`. Selected components are
all delayed with the same value
marked_components : `dict`
A dictionary that indicates which component is considered as marked
and what are the corresponding intervals ``I_j(l)``
Attributes
----------
n_nodes : `int`
Number of nodes of the estimated Hawkes process
n_realizations : `int`
Number of given realizations
baseline : np.ndarray, shape=(n_nodes,)
Estimation of the baseline
kernels_norms : np.ndarray, shape=(n_nodes, n_nodes)
L1 norm matrix of the kernel norms
kernels : list of list
Kernel's estimation on the quadrature points
mean_intensity : list of `float`
The estimated mean intensity
symmetries1d : list of 2-tuple
List of component index pairs for imposing symmetries on the mean
intensity (e.g, ``[(0,1),(2,3)]`` means that the mean intensity of
the components 0 and 1 must be the same and the mean intensity of the
components 2 and 3 also
Can be set using can be set using the `set_model` method.
symmetries2d : list of 2-tuple of 2-tuple
List of kernel coordinates pairs to impose symmetries on the kernel
matrix (e.g., ``[[(0,0),(1,1)],[(1,0),(0,1)]]`` for a bidiagonal
kernel in dimension 2)
Can be set using can be set using the `set_model` method.
mark_functions : list of 2-tuple
The mark functions as a list (lexical order on i,j and l, see below)
References
----------
<NAME>., & <NAME>. (2014).
Second order statistics characterization of Hawkes processes and
non-parametric estimation. `arXiv preprint arXiv:1401.0903`_.
.. _arXiv preprint arXiv:1401.0903: https://arxiv.org/pdf/1401.0903.pdf
"""
_attrinfos = {
'_hawkes_object': {},
'_lags': {},
'_lock': {
'writable': False
},
'_phi_ijl': {},
'_norm_ijl': {},
'_ijl2index': {},
'_index2ijl': {},
'_n_index': {},
'_mark_probabilities': {},
'_mark_probabilities_N': {},
'_mark_min': {},
'_mark_max': {},
'_lam_N': {},
'_lam_T': {},
'_claw': {},
'_claw1': {},
'_claw_X': {},
'_n_events': {},
'_int_claw': {},
'_IG': {},
'_IG2': {},
'_quad_x': {},
'_quad_w': {}
}
def __init__(self, delta_lag=.1, min_lag=1e-4, max_lag=40, n_quad=50,
max_support=40, min_support=1e-4, quad_method='gauss',
marked_components=None, delayed_component=None, delay=0.00001,
model=None, n_threads=1, claw_method='lin'):
Base.__init__(self)
# Init the claw sampling parameters
self.delta_lag = delta_lag
self.max_lag = max_lag
self.min_lag = min_lag
self.claw_method = claw_method
# Init quadrature method
self.quad_method = quad_method
self.n_quad = n_quad
self.min_support = min_support
self.max_support = max_support
# Init marked components
if marked_components is None:
marked_components = dict()
self.marked_components = marked_components
# Init attributes
self.n_realizations = 0
self._lags = None
self._compute_lags()
self.symmetries1d = []
self.symmetries2d = []
self.delayed_component = np.array(delayed_component)
self.delay = delay
# _claw : list of 2-tuple
# Represents the conditional laws written above (lexical order on i,
# j and l, see below). Each conditional law is represented by a
# pair (x, c) where x are the abscissa
self._claw = None
# _claw1 : list of list
# Represents the conditional laws written above without conditioning by
# the mark (so a i,j list)
self._claw1 = None
self._lock = None
# quad_x : `np.ndarray`, shape=(n_quad, )
# The abscissa of the quadrature points used for the Fredholm system
self._quad_x = None
# quad_w : `np.ndarray`, shape=(n_quad, )
# The weights the quadrature points used for the Fredholm system
self._quad_w = None
self._phi_ijl, self._norm_ijl = None, None
self.kernels, self.kernels_norms, self.baseline = None, None, None
self.mark_functions = None
if n_threads == -1:
import multiprocessing
n_threads = multiprocessing.cpu_count()
self.n_threads = n_threads
if model:
self.set_model(model)
def fit(self, events: list, T=None):
"""Fit the model according to the given training data.
Parameters
----------
events : `list` of `list` of `np.ndarray`
List of Hawkes processes realizations.
Each realization of the Hawkes process is a list of n_node for
each component of the Hawkes. Namely `events[i][j]` contains a
one-dimensional `numpy.array` of the events' timestamps of
component j of realization i.
If only one realization is given, it will be wrapped into a list
T : `double`, default=None
The duration (in physical time) of the realization. If it is None then
T is considered to be the time of the last event (of any component).
Returns
-------
output : `HawkesConditionalLaw`
The current instance of the Learner
"""
if not isinstance(events[0][0], np.ndarray):
events = [events]
for timestamps in events:
self.incremental_fit(timestamps, compute=False, T=T)
self.compute()
return self
def set_model(self, symmetries1d=list(), symmetries2d=list(),
delayed_component=None):
"""Set the model to be used.
Parameters
----------
symmetries1d : list of 2-tuple
List of component index pairs for imposing symmetries on the mean
intensity (e.g, ``[(0,1),(2,3)]`` means that the mean intensity of
the components 0 and 1 must be the same and the mean intensity of
the components 2 and 3 also.
Can be set using can be set using the `set_model` method.
symmetries2d : list of 2-tuple of 2-tuple
List of kernel coordinates pairs to impose symmetries on the kernel
matrix (e.g., ``[[(0,0),(1,1)],[(1,0),(0,1)]]`` for a bidiagonal
kernel in dimension 2)
Can be set using can be set using the `set_model` method.
delayed_component : list of `int`, shape=(N, ), default=`None`
list of node indices corresponding to node that should be delayed
(to avoid simultaneous jumps of different components which can be a
problem in the estimation)
If no model is specified then default values for these fields are used
Notes
-----
We set the symmetries, the kernel names and delayed components for
first realization only
"""
self.symmetries1d = symmetries1d
self.symmetries2d = symmetries2d
self.delayed_component = np.array(delayed_component)
def _init_basics(self, realization):
"""Init the dimension
"""
self.n_nodes = len(realization)
return realization
def _init_marked_components(self):
"""Init marked components
This builds the field self.marked_components so that it is set to
[component1_mark_intervals, ..., componentN_mark_intervals]
where each componentj_mark_intervals is of the form
[[min1, max1], [min2, max2], ..., [mink, maxk]]
It describes the intervals the function f^ij are constants on.
"""
marked_components = self.marked_components
self.marked_components = []
for i in range(0, self.n_nodes):
self.marked_components.append([])
if i in marked_components:
self.marked_components[i].append(
[-sys.float_info.max, marked_components[i][0]])
for j in range(0, len(marked_components[i]) - 1):
self.marked_components[i].append(
marked_components[i][j:j + 2])
self.marked_components[i].append(
[marked_components[i][-1], sys.float_info.max])
else:
self.marked_components[i].append(
[-sys.float_info.max, sys.float_info.max])
def _init_index(self):
"""Init for indexing
Given i,j,l --> index and vice versa (i and j are components of the
Hawkes and l is the marked interval index of the component j)
"""
self._ijl2index = []
self._index2ijl = []
index = 0
for i in range(0, self.n_nodes):
self._ijl2index.append([])
for j in range(0, self.n_nodes):
self._ijl2index[i].append([])
for l in range(0, len(self.marked_components[j])):
self._ijl2index[i][j].append(index)
self._index2ijl.append((i, j, l))
index += 1
self._n_index = len(self._index2ijl)
def _init_mark_stats(self):
"""We initialize the mark probabilities and min-max of the marks
"""
# Proba for the mark
self._mark_probabilities = []
# In order to compute the probability we need to store the number of
# events
self._mark_probabilities_N = []
self._mark_min = [sys.float_info.max] * self.n_nodes
self._mark_max = [sys.float_info.min] * self.n_nodes
for i in range(0, self.n_nodes):
self._mark_probabilities_N.append(
[0] * len(self.marked_components[i]))
self._mark_probabilities.append(
[0] * len(self.marked_components[i]))
def _init_lambdas(self):
"""Init the lambda's
"""
self.mean_intensity = [0] * self.n_nodes
self._lam_N = [0] * self.n_nodes
self._lam_T = [0] * self.n_nodes
# Used to store the number of events of each component that
# have been used to perform estimation on all the lags
# versus the number of events that could not be used for all the lags
# Warning : we don't take care of marks for this computation
# normally we should do this computation independantly for each mark
self._n_events = np.zeros((2, self.n_nodes))
def _init_claws(self):
"""Init the claw storage
"""
self._claw = [0] * len(self._index2ijl)
def _index_to_lexical(self, index):
"""Convert index to lexical order (i,j,l)
Parameters
----------
index : `int`
Returns
-------
i : `int`
First node of the Hawkes
j : `int`
Second node of the Hawkes
l : `int`
Marked interval index of the component j
Examples
--------
>>> from tick.hawkes import HawkesConditionalLaw
>>> import numpy as np
>>> learner = HawkesConditionalLaw()
>>> learner.incremental_fit([np.array([2.1, 3, 4]),
... np.array([2., 2.01, 8])],
... compute=False)
>>> learner._index_to_lexical(2)
(1, 0, 0)
"""
return self._index2ijl[index]
def _lexical_to_index(self, i, j, l):
"""Convert lexical order (i,j,l) to index
Parameters
----------
i : `int`
First node of the Hawkes
j : `int`
Second node of the Hawkes
l : `int`
Marked interval index of the component j
Returns
-------
index : `int`
Examples
--------
>>> from tick.hawkes import HawkesConditionalLaw
>>> import numpy as np
>>> learner = HawkesConditionalLaw()
>>> learner.incremental_fit([np.array([2.1, 3, 4]),
... np.array([2., 2.01, 8])],
... compute=False)
>>> learner._lexical_to_index(1, 0, 0)
2
"""
return self._ijl2index[i][j][l]
def incremental_fit(self, realization, T=None, compute=True):
"""Allows to add some more realizations before estimation is performed.
It updates the conditional laws (stored in `self._claw` and
`self._claw1`) and of the mean intensity (in `self._mean_intensity`).
Parameters
----------
realization : list of `np.narrays` or list of 2-tuple of `np.arrays`
* list of `np.narrays`, shape=(N) , representing the arrival times
of each component
* list of pairs (t,m) np.arrays representing the arrival times of
each component (x) and the cumulative marks signal (m)
T : `double`, default=None
The duration (in physical time) of the realization. If it is -1 then
T is considered to be the time of the last event (of any component).
compute : `bool`, default=`False`
Computes kernel estimation. If set to `False`, you will have to
manually call `compute` method afterwards.
This is useful to add multiple realizations and compute only once
all conditional laws have been updated.
"""
# If first realization we perform some init
if self.n_realizations == 0:
realization = self._init_basics(realization)
self._init_marked_components()
self._init_index()
self._init_mark_stats()
self._init_lambdas()
self._init_claws()
else:
if compute and self._has_been_computed_once():
warnings.warn(("compute() method was already called, "
"computed kernels will be updated."))
# We perform some checks
if self.n_nodes != len(realization):
msg = 'Bad dimension for realization, should be %d instead of %d' \
% (self.n_nodes, len(realization))
raise ValueError(msg)
# Realization normalization
if not isinstance(realization[0], (list, tuple)):
realization = [(r, np.arange(len(r), dtype=np.double) + 1)
for r in realization]
# Do we need to delay the realization ?
if self.delayed_component:
old_realization = realization
realization = []
for i in range(0, self.n_nodes):
if any(self.delayed_component == i):
if len(old_realization[i][0]) == 0:
realization.append(old_realization[i])
else:
realization.append((old_realization[i][0] + self.delay,
old_realization[i][1]))
else:
realization.append(old_realization[i])
# We compute last event time
last_event_time = -1
for i in range(0, self.n_nodes):
if len(realization[i][0]) > 0:
last_event_time = max(realization[i][0][-1], last_event_time)
# If realization empty --> return
if last_event_time < 0:
warnings.warn(
"An empty realization was passed. No computation was performed."
)
return
# We set T if needed
if T is None:
T = last_event_time
elif T < last_event_time:
raise ValueError("Argument T (%g) specified is too small, "
"you should use default value or a value "
"greater or equal to %g." % (T, last_event_time))
# We update the mark probabilities and min-max
for i in range(0, self.n_nodes):
if len(realization[i][0]) == 0:
continue
# We have to take into account the first mark
der = np.hstack([realization[i][1][0], np.diff(realization[i][1])])
total = 0
self._mark_min[i] = min(self._mark_min[i], np.min(der))
self._mark_max[i] = max(self._mark_max[i], np.max(der))
for l, interval in enumerate(self.marked_components[i]):
self._mark_probabilities_N[i][l] += \
np.sum((der >= interval[0]) & (der < interval[1]))
total += self._mark_probabilities_N[i][l]
for l, interval in enumerate(self.marked_components[i]):
self._mark_probabilities[i][l] = \
self._mark_probabilities_N[i][l] / total
der[:] = 1
# We update the Lambda
for i in range(0, self.n_nodes):
if len(realization[i][0]) <= 0:
continue
self._lam_N[i] += len(realization[i][0])
self._lam_T[i] += T
self.mean_intensity[i] = self._lam_N[i] / self._lam_T[i]
# We update the _n_events of component i
# Warning : we don't take care of marks for this computation
# normally we should do this computation independantly for each mark
for i in range(0, self.n_nodes):
good = np.sum(realization[i][0] <= T - self._lags[-1])
bad = len(realization[i][0]) - good
self._n_events[0, i] += good
self._n_events[1, i] += bad
# We might want to use threads, since this is the time consuming part
with_multi_processing = self.n_threads > 1
if with_multi_processing:
pool = ThreadPool(with_lock=True, max_threads=self.n_threads)
self._set('_lock', pool.lock)
for index, (i, j, l) in enumerate(self._index2ijl):
if with_multi_processing:
pool.add_work(self._PointProcessCondLaw, realization, index, i,
j, l, T)
else:
self._PointProcessCondLaw(realization, index, i, j, l, T)
if with_multi_processing:
pool.start()
# Here we compute the G^ij (not conditioned to l)
# It is recomputed each time
self._claw1 = []
for i in range(0, self.n_nodes):
self._claw1.append([])
for j in range(0, self.n_nodes):
index = self._ijl2index[i][j][0]
self._claw1[i].append(np.copy(self._claw[index]))
self._claw1[i][j] *= self._mark_probabilities[j][0]
for l in range(1, len(self._ijl2index[i][j])):
index = self._ijl2index[i][j][l]
self._claw1[i][j] += self._claw[index] * \
self._mark_probabilities[j][l]
self.n_realizations += 1
# Deal with symmetrization
for (i, j) in self.symmetries1d:
t = (self.mean_intensity[i] + self.mean_intensity[j]) / 2
self.mean_intensity[i] = t
self.mean_intensity[j] = t
t = (self._mark_min[i] + self._mark_min[j]) / 2
self._mark_min[i] = t
self._mark_min[j] = t
t = (self._mark_max[i] + self._mark_max[j]) / 2
self._mark_max[i] = t
self._mark_max[j] = t
if self.marked_components[i] != self.marked_components[j]:
continue
for l in range(0, len(self.marked_components[i])):
t = (self._mark_probabilities_N[i][l] +
self._mark_probabilities_N[j][l]) / 2
self._mark_probabilities_N[i][l] = t
self._mark_probabilities_N[j][l] = t
t = (self._mark_probabilities[i][l] +
self._mark_probabilities[j][l]) / 2
self._mark_probabilities[i][l] = t
self._mark_probabilities[j][l] = t
for ((i1, j1), (i2, j2)) in self.symmetries2d:
t = (self._claw1[i1][j1] + self._claw1[i2][j2]) / 2
self._claw1[i1][j1] = t
self._claw1[i2][j2] = t
if self.marked_components[j1] != self.marked_components[j2]:
continue
for l in range(0, len(self.marked_components[j1])):
index1 = self._ijl2index[i1][j1][l]
index2 = self._ijl2index[i2][j2][l]
t = (self._claw[index1] + self._claw[index2]) / 2
self._claw[index1] = t
self._claw[index2] = t
# We can remove the thread lock (lock disallows pickling)
self._set('_lock', None)
if compute:
self.compute()
def _PointProcessCondLaw(self, realization, index, i, j, l, T):
claw_X = np.zeros(len(self._lags) - 1)
claw_Y = np.zeros(len(self._lags) - 1)
lambda_i = len(realization[i][0]) / T
PointProcessCondLaw(
realization[i][0], realization[j][0], realization[j][1],
self._lags, self.marked_components[j][l][0],
self.marked_components[j][l][1], T, lambda_i, claw_X, claw_Y)
self._claw_X = claw_X
# TODO: this lock acquire is very expensive here
if self.n_threads > 1:
self._lock.acquire()
# Update claw
if self.n_realizations == 0:
self._claw[index] = claw_Y
else:
self._claw[index] *= self.n_realizations
self._claw[index] += claw_Y
self._claw[index] /= self.n_realizations + 1
# Unlock
if self.n_threads > 1:
self._lock.release()
def _compute_lags(self):
"""Computes the lags at which the claw will be computed
"""
claw_method = self.claw_method
# computes the claw either on a uniform grid (lin) or a semi log
# uniform grid (log)
if claw_method == "log":
y1 = np.arange(0., self.min_lag, self.min_lag * self.delta_lag)
y2 = np.exp(
np.arange(
np.log(self.min_lag), np.log(self.max_lag),
self.delta_lag))
self._lags = np.append(y1, y2)
if claw_method == "lin":
self._lags = np.arange(0., self.max_lag, self.delta_lag)
def _compute_ints_claw(self):
"""Computes the claw and its integrals at the difference of
quadrature points using a linear interpolation
"""
self._int_claw = [0] * self._n_index
# Builds a linear interpolation of the claws at the difference of
# quadrature (only positive abscissa are kept)
for index in range(self._n_index):
xe = self._claw_X
ye = self._claw[index]
xs2 = np.array(
[(a - b)
for (a, b) in itertools.product(self._quad_x, repeat=2)])
xs2 = np.append(xe, xs2)
xs2 = np.append(self._quad_x, xs2)
xs2 = np.array(np.lib.arraysetops.unique(xs2))
xs2 = np.array(np.core.fromnumeric.sort(xs2))
xs2 = xs2[xs2 >= 0.]
ys2 = np.zeros(len(xs2))
j = 0
for i in range(1, len(xe)):
while j < len(xs2) and xs2[j] < xe[i]:
ys2[j] = (ye[i - 1]) + ((ye[i]) - (ye[i - 1])) * (
xs2[j] - xe[i - 1]) / (xe[i] - xe[i - 1])
j += 1
sc = (xs2, ys2)
self._int_claw[index] = sc
# Computes the integrals of the claws (IG) and the integrals of x
# times the claws from 0 to the abscissa we have just computed
self._IG = []
self._IG2 = []
for i in range(self._n_index):
xc = self._int_claw[i][0]
yc = self._int_claw[i][1]
iyc_IG = np.append(
np.array(0.), np.cumsum(np.diff(xc) * (yc[:-1] + yc[1:]) / 2.))
self._IG += [(xc, iyc_IG)]
iyc_IG2 = np.append(
np.array(0.),
np.cumsum((yc[:-1] + yc[1:]) / 2. * np.diff(xc) * xc[:-1] +
np.diff(xc) * np.diff(xc) / 3. * np.diff(yc) +
np.diff(xc) * np.diff(xc) / 2. * yc[:-1]))
self._IG2 += [(xc, iyc_IG2)]
@staticmethod
def _lin0(sig, t):
"""Find closest value of a signal, zero value border
"""
x, y = sig
if t >= x[-1]:
return 0
index = np.searchsorted(x, t)
if index == len(y) - 1:
return y[index]
elif np.abs(x[index] - t) < np.abs(x[index + 1] - t):
return y[index]
else:
return y[index + 1]
@staticmethod
def _linc(sig, t):
"""Find closest value of a signal, continuous border
"""
x, y = sig
if t >= x[-1]:
return y[-1]
index = np.searchsorted(x, t)
if np.abs(x[index] - t) < np.abs(x[index + 1] - t):
return y[index]
else:
return y[index + 1]
def _G(self, i, j, l, t):
"""Returns the value of a claw at a point
Used to fill V and M with 'gauss' method
"""
if t < 0:
warnings.warn("G(): should not be called for t < 0")
index = self._ijl2index[i][j][l]
return HawkesConditionalLaw._lin0(self._int_claw[index], t)
def _DIG(self, i, j, l, t1, t2):
"""Returns the integral of a claw between t1 and t2
"""
if t1 >= t2:
warnings.warn("t2>t1 wrong in DIG")
index = self._ijl2index[i][j][l]
return HawkesConditionalLaw._linc(self._IG[index], t2) - \
HawkesConditionalLaw._linc(self._IG[index], t1)
def _DIG2(self, i, j, l, t1, t2):
"""Returns the integral of x times a claw between t1 and t2
"""
if t1 >= t2:
warnings.warn("t2>t1 wrong in DIG2")
index = self._ijl2index[i][j][l]
return HawkesConditionalLaw._linc(self._IG2[index], t2) - \
HawkesConditionalLaw._linc(self._IG2[index], t1)
def compute(self):
"""Computes kernel estimation by solving a Fredholm system.
"""
# We raise an exception if a claw component had no input to be computed
if any(self._n_events[0, :] == 0):
k = np.where(self._n_events[0, :] == 0)[0]
msg = "Cannot run estimation : not enough events for components {}" \
.format(k)
raise ValueError(msg)
# Here we compute the quadrature points and the corresponding weights
# self.quad_x and self.quad_w
if self.quad_method in {'gauss', 'gauss-'}:
self._quad_x, self._quad_w = leggauss(self.n_quad)
self._quad_x = self.max_support * (self._quad_x + 1) / 2
self._quad_w *= self.max_support / 2
elif self.quad_method == 'log':
logstep = (np.log(self.max_support) - np.log(
self.min_support) + 1.) / \
self.n_quad
x1 = np.arange(0., self.min_support, self.min_support * logstep)
x2 = np.exp(
np.arange(
np.log(self.min_support), np.log(self.max_support),
logstep))
self._quad_x = np.append(x1, x2)
self._quad_w = self._quad_x[1:] - self._quad_x[:-1]
self._quad_w = np.append(self._quad_w, self._quad_w[-1])
self.n_quad = len(self._quad_x)
self._quad_x = np.array(self._quad_x)
self._quad_w = np.array(self._quad_w)
elif self.quad_method == 'lin':
x1 = np.arange(0., self.max_support,
self.max_support / self.n_quad)
self._quad_x = x1
self._quad_w = self._quad_x[1:] - self._quad_x[:-1]
self._quad_w = np.append(self._quad_w, self._quad_w[-1])
self.n_quad = len(self._quad_x)
self._quad_x = np.array(self._quad_x)
self._quad_w = np.array(self._quad_w)
# Computes the claw and its integrals at the difference of
# quadrature points using a linear interpolation
self._compute_ints_claw()
# For each i we write and solve the system V = M PHI
index_first = 0
self._phi_ijl = []
self._norm_ijl = []
self.kernels = []
self.kernels_norms = np.zeros((self.n_nodes, self.n_nodes))
for i in range(0, self.n_nodes):
# We must compute the last valid index which corresponds to i
index_last = index_first
for index_last in range(index_first, self._n_index):
(i1, j1, l1) = self._index2ijl[index_last]
if i1 != i:
index_last -= 1
break
# Number of indices corresponding to i
n_index = index_last - index_first + 1
# Compute V and M
V = self._compute_V(i, n_index, self.n_quad, index_first,
index_last)
M = self._compute_M(n_index, self.n_quad, index_first, index_last,
self.quad_method)
# Then we solve it
res = solve(M, V)
self._estimate_kernels_and_norms(i, index_first, index_last, res,
self.n_quad, self.quad_method)
index_first = index_last + 1
self._estimate_baseline()
self._estimate_mark_functions()
def _compute_V(self, i, n_index, n_quad, index_first, index_last):
V = np.zeros((n_index * n_quad, 1))
for index in range(index_first, index_last + 1):
(x, j, l) = self._index2ijl[index]
for n in range(0, n_quad):
index_i_quad = (index - index_first) * n_quad + n
V[index_i_quad] = self._G(i, j, l, self._quad_x[n])
return V
def _compute_M(self, n_index, n_quad, index_first, index_last, method):
M = np.zeros((n_index * n_quad, n_index * n_quad))
for index in range(index_first, index_last + 1):
(x, j, l) = self._index2ijl[index]
for index1 in range(index_first, index_last + 1):
(i1, j1, l1) = self._index2ijl[index1]
fact = self.mean_intensity[j1] / self.mean_intensity[j]
for n in range(0, n_quad):
for n1 in range(0, n_quad):
if method == 'gauss' or method == 'gauss-':
self._fill_M_for_gauss(M, method, n_quad,
index_first, index, index1,
j, l, j1, l1, fact, n, n1)
elif method == 'log' or method == 'lin':
self._fill_M_for_log_lin(
M, method, n_quad, index_first, index, index1,
j, l, j1, l1, fact, n, n1)
return M
def _fill_M_for_gauss(self, M, method, n_quad, index_first, index, index1,
j, l, j1, l1, fact, n, n1):
def x_value(n_lower, n_greater, j_lower, j_greater, l_greater):
return self._mark_probabilities[j1][l1] * self._quad_w[n1] * \
self._G(j_lower, j_greater, l_greater,
self._quad_x[n_greater] - self._quad_x[n_lower])
if n > n1:
x = x_value(n1, n, j1, j, l)
elif n < n1:
x = fact * x_value(n, n1, j, j1, l1)
else:
if method == 'gauss-':
x = 0
else:
x1 = x_value(n1, n, j1, j, l)
x2 = fact * x_value(n, n1, j, j1, l1)
x = (x1 + x2) / 2
if method == 'gauss-':
row = (index - index_first) * n_quad + n
col = (index1 - index_first) * n_quad + n
M[row, col] -= x
if l == l1 and j == j1 and n == n1:
x += 1
row = (index - index_first) * n_quad + n
col = (index1 - index_first) * n_quad + n1
M[row, col] += x
def _fill_M_for_log_lin(self, M, method, n_quad, index_first, index,
index1, j, l, j1, l1, fact, n, n1):
mark_probability = self._mark_probabilities[j1][l1]
ratio_dig = lambda n_q: ((self._quad_x[n] - self._quad_x[n_q]) / self._quad_w[n_q])
ratio_dig2 = lambda n_q: 1. / self._quad_w[n_q]
dig_arg_greater = lambda n_q: (j1, j, l, self._quad_x[n] - self._quad_x[n_q] - self._quad_w[n_q], self._quad_x[n] - self._quad_x[n_q])
dig_arg_lower = lambda n_q: (j, j1, l1, self._quad_x[n_q] - self._quad_x[n], self._quad_x[n_q] - self._quad_x[n] + self._quad_w[n_q])
x = 0
if n > n1:
x += mark_probability * self._DIG(*dig_arg_greater(n1))
if n1 < n_quad - 1:
x -= ratio_dig(n1) * mark_probability * \
self._DIG(*dig_arg_greater(n1))
x += ratio_dig2(n1) * mark_probability * \
self._DIG2(*dig_arg_greater(n1))
if n1 > 0:
x += ratio_dig(n1 - 1) * mark_probability * \
self._DIG(*dig_arg_greater(n1 - 1))
x -= ratio_dig2(n1 - 1) * mark_probability * \
self._DIG2(*dig_arg_greater(n1 - 1))
elif n < n1:
x += fact * mark_probability * self._DIG(*dig_arg_lower(n1))
if n1 < n_quad - 1:
x -= fact * ratio_dig(n1) * mark_probability * \
self._DIG(*dig_arg_lower(n1))
x -= fact * ratio_dig2(n1) * mark_probability * \
self._DIG2(*dig_arg_lower(n1))
if n1 > 0:
x += fact * ratio_dig(n1 - 1) * mark_probability * \
self._DIG(*dig_arg_lower(n1 - 1))
x += fact * ratio_dig2(n1 - 1) * mark_probability * \
self._DIG2(*dig_arg_lower(n1 - 1))
elif n == n1:
x += fact * self._mark_probabilities[j1][l1] * \
self._DIG(*dig_arg_lower(n1))
if n1 < n_quad - 1:
x -= fact * ratio_dig(n1) * mark_probability * \
self._DIG(*dig_arg_lower(n1))
x -= fact * ratio_dig2(n1) * mark_probability * \
self._DIG2(*dig_arg_lower(n1))
if n1 > 0:
x += ratio_dig(n1 - 1) * mark_probability * \
self._DIG(*dig_arg_greater(n1 - 1))
x -= ratio_dig2(n1 - 1) * mark_probability * \
self._DIG2(*dig_arg_greater(n1 - 1))
if l == l1 and j == j1 and n == n1:
x += 1
row = (index - index_first) * n_quad + n
col = (index1 - index_first) * n_quad + n1
M[row, col] += x
def _estimate_kernels_and_norms(self, i, index_first, index_last, res,
n_quad, method):
# We rearrange the solution vector and compute the norms
# Here we get phi^ij_l and the corresponding norms
for index in range(index_first, index_last + 1):
y = res[(index - index_first) * n_quad:(index - index_first + 1) *
n_quad][:, 0]
self._phi_ijl.append((self._quad_x, y))
if method in {'gauss', 'gauss-'}:
self._norm_ijl.append(np.sum(y * self._quad_w))
elif method in {'log', 'lin'}:
# interpolation (the one we used in the scheme) norm
self._norm_ijl.append(
np.sum((y[:-1] + y[1:]) / 2. * self._quad_w[:-1]))
# Now we compute phi^ij and the corresponding norms
self.kernels.append([])
for j in range(0, self.n_nodes):
index = self._ijl2index[i][j][0]
self.kernels[i].append(
np.array(self._phi_ijl[index]) *
self._mark_probabilities[j][0])
self.kernels_norms[i, j] = self._norm_ijl[index] * \
self._mark_probabilities[j][0]
index += 1
for l in range(1, len(self.marked_components[j])):
self.kernels[i][j] += self._phi_ijl[index] * \
self._mark_probabilities[j][l]
self.kernels_norms[i, j] += self._norm_ijl[index] * \
self._mark_probabilities[j][l]
index += 1
def _estimate_baseline(self):
M = np.eye(self.n_nodes) - self.kernels_norms
self.baseline = np.dot(M, self.mean_intensity)
def _estimate_mark_functions(self):
self.mark_functions = []
for i in range(0, self.n_nodes):
self.mark_functions.append([])
for j in range(0, self.n_nodes):
if len(self.marked_components[j]) == 1:
self.mark_functions[i].append((np.array([1]),
| np.array([1]) | numpy.array |
import os
import unittest
import numpy as np
from spatialnc import ipw
from topocalc.skew import adjust_spacing, skew
class TestSkew(unittest.TestCase):
def test_skew(self):
""" Test the skew of an image """
test_dir = os.path.dirname(os.path.abspath(__file__))
# read in the dem from the gold file as we're
# comparing the arrays at the end and it will
# ensure that the bit resolution is kept
infile = os.path.join(
test_dir, 'Lakes/gold_ipw/gold_dem.ipw')
d = ipw.IPW(infile)
gold_dem = d.bands[0].data
for angle in range(-45, 45, 5):
# Get the IPW gold skew values
gold = ipw.IPW(
os.path.join(
test_dir,
'Lakes/gold_ipw/skew/skew_{}.ipw'.format(angle)
)
)
gold_data = gold.bands[0].data
# skew the initial array
sarr = skew(gold_dem, angle=angle, fill_min=True)
self.assertTrue(np.array_equal(sarr, gold_data))
# skew it back to original
sbarr = skew(sarr, angle=angle, fwd=False)
self.assertTrue(np.array_equal(sbarr, gold_dem))
def test_skew_error(self):
"""Test error with angle"""
self.assertRaises(ValueError, skew, np.ones(10), -100)
self.assertRaises(ValueError, skew, | np.ones(10) | numpy.ones |
"""
Draw Figures - Chapter 4
This script generates all of the figures that appear in Chapter 4 of the textbook.
Ported from MATLAB Code
<NAME>
24 March 2021
"""
import utils
from utils.unit_conversions import lin_to_db, db_to_lin
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
import scipy as sp
from scipy import stats
from scipy import fftpack
import seaborn as sns
import detector
def make_all_figures(close_figs=False):
"""
Call all the figure generators for this chapter
:close_figs: Boolean flag. If true, will close all figures after generating them; for batch scripting.
Default=False
:return: List of figure handles
"""
# Initializes colorSet - Mx3 RGB vector for successive plot lines
colors = plt.get_cmap("tab10")
# Reset the random number generator, to ensure reproducability
rng = np.random.default_rng(0)
# Find the output directory
prefix = utils.init_output_dir('chapter4')
# Activate seaborn for prettier plots
sns.set()
# Generate all figures
fig1a = make_figure_1a(prefix)
fig1b = make_figure_1b(prefix, rng)
fig2a = make_figure_2a(prefix, rng)
fig2b = make_figure_2b(prefix, rng)
fig3 = make_figure_3(prefix)
fig5 = make_figure_5(prefix, colors)
fig6 = make_figure_6(prefix, rng, colors)
fig7 = make_figure_7(prefix)
fig8 = make_figure_8(prefix, colors)
figs = [fig1a, fig1b, fig2a, fig2b, fig3, fig5, fig6, fig7, fig8]
if close_figs:
for fig in figs:
plt.close(fig)
return None
else:
plt.show()
return figs
def make_figure_1a(prefix=None):
"""
Figure 1a - Alternating Sine Waves
Ported from MATLAB Code
<NAME>
24 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
# Sine wave
num_points = 1024 # Sample points
y_chip = np.exp(1j*(np.pi/2+2*np.pi*np.arange(num_points)/num_points))
# String together multiple periods
code = np.array([0, 1, 1, 0, 1])
symbol = np.exp(1j*np.pi*code)
y_full = np.ravel(np.expand_dims(y_chip, axis=0)*np.expand_dims(symbol, axis=1))
# x axis
t_vec = np.arange(np.size(y_full))
fig1a = plt.figure()
plt.plot(t_vec, np.real(y_full), color='k', linewidth=0.5)
plt.plot(t_vec, np.zeros_like(t_vec), color='k', linewidth=0.5)
for idx, bit in enumerate(code):
plt.text(num_points/2 + num_points*idx-1, 1.5, '{}'.format(bit))
plt.plot(num_points*idx*np.array([1, 1]), np.array([-1, 2]), color='k', linestyle=':')
# Annotation
plt.annotate(s='', xy=(2*num_points, 1.1), xytext=(3*num_points, 1.1), arrowprops=dict(arrowstyle='<->'))
plt.text(2.35*num_points, 1.25, r'$T_{chip}$')
# Turn off the axes
ax = plt.gca()
ax.axis('off')
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig1a.svg')
plt.savefig(prefix + 'fig1a.png')
return fig1a
def make_figure_1b(prefix=None, rng=None):
"""
Figure 1b - Figure 1b, Bandwidth
Ported from MATLAB Code
<NAME>
24 March 2021
:param prefix: output directory to place generated figure
:param rng: random number generator
:return: figure handle
"""
if rng is None:
rng = np.random.default_rng()
num_samples = 16 # Number of samples per cycle
num_code_bits = 128 # Length of transmit code in bits
y_chip = np.exp(1j*(np.pi/2+2*np.pi*np.arange(num_samples)/num_samples))
num_code_samples = num_code_bits*num_samples
lo = np.exp(1j*2*np.pi*np.arange(num_code_samples)*4/num_samples) # 4 samples/cycle
num_monte_carlo = 100
spectral_average = np.zeros_like(lo)
for ii in range(num_monte_carlo):
# Generate a random code
code = rng.integers(low=0, high=2, size=(num_code_bits, 1)) # with random integers, the interval is [low, high)
symbol = np.exp(1j*np.pi*code)
# Random starting phase
starting_phase = np.exp(1j*rng.uniform(low=0, high=2*np.pi))
# Generate full transmit signal at the intermediate frequency (IF) of y_chip
signal_if = np.ravel(starting_phase*symbol*y_chip)
# Mix with the local oscillator (lo) to get the radio frequency (RF) sample
signal_rf = signal_if*lo
# Take the fourier transform
spectral_average += np.absolute(sp.fft((np.real(signal_rf))))
# Normalize, and use an fftshift to put the center frequency in the middle of the vector
spectral_average = fftpack.fftshift(spectral_average)/np.max(np.absolute(spectral_average))
fig1b = plt.figure()
plt.plot(np.linspace(start=-1, stop=1, num=num_code_samples),
2*lin_to_db(np.absolute(spectral_average)))
# Plot top and -3 dB lines
plt.plot([-1, 1], [0, 0], color='k', linestyle=':')
plt.plot([-1, 1], [-3, -3], color='k', linestyle=':')
plt.plot([0, 0], [-20, 0], color='k', linestyle='-')
plt.ylim([-40, 3])
# Create textbox
plt.text(-.4, -1.5, '3 dB')
plt.text(.3, 2, r'$f_0$')
# Create doublearrows
f0 = 0.625 # (normalized freq is 1/16 (y_chip) + 4/16 (lo))
bw = 0.125
plt.annotate(s='', xy=(0, 1), xytext=(0.64, 1), arrowprops=dict(arrowstyle='<->', color='k'))
plt.annotate(s='', xy=(-.5, 0), xytext=(-.5, -3), arrowprops=dict(arrowstyle='<->', color='k'))
plt.annotate(s='', xy=(f0-bw/2, -3), xytext=(f0+bw/2, -3), arrowprops=dict(arrowstyle='<->', color='k'))
plt.annotate(s=r'$B_s=1/T_{\mathrm{chip}}$', xy=(f0, -3), xytext=(.1, -6), arrowprops=dict(arrowstyle='-',
color='k'))
# Turn off the axes
ax = plt.gca()
ax.axis('off')
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig1b.svg')
plt.savefig(prefix + 'fig1b.png')
return fig1b
def make_figure_2a(prefix=None, rng=None):
"""
Figure 2a - Chip Rate
Ported from MATLAB Code
<NAME>
24 March 2021
:param prefix: output directory to place generated figure
:param rng: random number generator
:return: figure handle
"""
if rng is None:
rng = np.random.default_rng()
# Generate the digital signals
rate_data = 4 # bits/sec
rate_chip = 16 # bits/sec
num_code_bits = int(np.fix(rate_chip/rate_data))
num_data_bits = 4
num_full_bits = num_code_bits*num_data_bits
data_bits = rng.integers(low=0, high=2, size=(1, num_data_bits))
code_bits = rng.integers(low=0, high=2, size=(num_code_bits, 1))
code_bits_full, data_bits_full = np.meshgrid(code_bits, data_bits)
out_bits_full = np.logical_xor(data_bits_full, code_bits_full)
out_bits_full = out_bits_full.astype(int)
# Convert from bits to symbols
data_symbols = np.reshape(np.exp(1j*np.pi*data_bits_full), newshape=(num_full_bits, 1))
code_symbols = np.reshape(np.exp(1j*np.pi*code_bits_full), newshape=(num_full_bits, 1))
out_symbols = np.reshape(np.exp(1j*np.pi*out_bits_full), newshape=(num_full_bits, 1))
# Generate the signals
osf = 16 # Samples per cycle
y = np.expand_dims(np.exp(1j*(np.pi/2+2*np.pi*np.arange(osf)/osf)), axis=0)
# Construct the code signals
y_data = np.ravel(y*data_symbols)
y_code = np.ravel(y*code_symbols)
y_dsss = np.ravel(y*out_symbols)
fig2a = plt.figure()
# Start with the Signals at the origin
plt.plot(np.arange(num_full_bits*osf), np.real(y_data)+6, label='Data Signal')
plt.plot(np.arange(num_code_bits*osf), np.real(y_code[0:num_code_bits*osf])+3, label='Spreading Code')
plt.plot(np.arange(num_full_bits*osf), np.real(y_dsss), label='Encoded Signal')
# Add the code and vertical lines
for idx, bit in enumerate(np.ravel(out_bits_full)):
plt.text(osf*idx+osf/2, 1.5, '{}'.format(bit))
plt.plot(osf*idx*np.array([1, 1]), [-1, 2], color='w', linestyle='-', linewidth=.5, label=None)
for idx, bit in enumerate(np.ravel(code_bits)):
plt.text(osf*idx+osf/2, 4.5, '{}'.format(bit))
plt.plot(osf*idx*np.array([1, 1]), [2, 5], color='w', linestyle='-', linewidth=.5, label=None)
for idx, bit in enumerate(np.ravel(data_bits)):
plt.text(osf*num_code_bits*idx+osf*num_code_bits/2, 7.5, '{}'.format(bit))
plt.plot(osf*num_code_bits*idx*np.array([1, 1]), [2, 8], color='w', linestyle='-', linewidth=.5, label=None)
plt.grid('off')
ax = plt.gca()
ax.axis('off')
plt.legend(loc='right')
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig2a.svg')
plt.savefig(prefix + 'fig2a.png')
return fig2a
def make_figure_2b(prefix=None, rng=None):
"""
Figure 2b - Spectrum
Ported from MATLAB Code
<NAME>
24 March 2021
:param prefix: output directory to place generated figure
:param rng: random number generator
:return: figure handle
"""
if rng is None:
rng = | np.random.default_rng(0) | numpy.random.default_rng |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 09 17:41:27 2017
@author: Grant
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 25 20:33:29 2017
@author: Grant
An implementation of the Successive Over-Relaxation algorithm in Python, using
just-in-time compiling from numba for additional speed
"""
import numpy as np
from numba import njit, f8, u8
def solve(v0, b, g, omega, tol, theta, lamb):
'''Wrapper function to use SOR algorithm to solve Ax = b
Parameters
===========
xv: numpy array
First guess for solution
b: numpy array
vector to represent A * w
g: numpy array
vector representing early excercise values
omega: float
relaxation factor
tol: float
error tolerancd for stopping iteration (error for convergence)
theta: float
parameter controlling what discretization method is being used
lamb: float
lambda parameter from option pricing model
'''
# Format dtype of all parameters to numba double precision float
# this helps @njit work correctly
v0 = f8(v0)
b = f8(b)
g = f8(g)
omega = f8(omega)
M = u8(10 ** 6)
tol = f8(tol)
theta = f8(theta)
lamb = f8(lamb)
# call actual SOR algorithm (need the numba dtypes to allow njit compile)
x = solve_body(v0, b, g, omega, tol, theta, lamb, M)
return x
@njit
def solve_body(v0, b, g, omega, tol, theta, lamb, M):
'''Projected SOR function. Iterates until errror is less than specified, or M
iterations, whichever comes first.
'''
# set initial guess
err = f8(tol + 1000000) # initial error level
v = v0
# While loop. Main loop exit after M iterations, but has a secondary break
# that stops when the observed error is less than selected tolerance
for k in range(0,M):
v_new = SOR_iter(b, v, g, omega, tol, theta, lamb) # perform SOR iteration
err = resid(v_new, v) # compute new residual
v = v_new
# second break parameter (under error tolerance)
if err < tol:
break
return v
@njit
def SOR_iter(b, v, g, omega, tol, theta, lamb):
''' Helper function to perform SOR iteration step.
'''
N = len(b)
v_new = np.zeros(N) #initialize x_new (outside of loop)
# first step of SOR algorithm (haven't yet computed any of x_new)
v_new[0] = 0
for i in range(1,N):
# Compute new xi using SOR algorithm
v_new[i] = np.maximum(g[i+1],
v[i] + omega / (1 + 2 * theta * lamb) *
(b[i] + theta * lamb * v_new[i-1] -
(1 + 2 * theta * lamb) * v[i] + theta * lamb * v[i+1]))
return v_new
@njit
def resid(x_new, x):
''' Calculate L2-norm of x and x_new'''
# need to be numba double precision float so njit will work in body of
# solve function
err = f8( | np.linalg.norm(x_new - x) | numpy.linalg.norm |
import os
from gym import spaces
import numpy as np
import pybullet as p
from .env import AssistiveEnv
class BedBathingEnv(AssistiveEnv):
def __init__(self, robot_type='pr2', human_control=False):
super(BedBathingEnv, self).__init__(robot_type=robot_type, task='bed_bathing', human_control=human_control, frame_skip=5, time_step=0.02, action_robot_len=7, action_human_len=(10 if human_control else 0), obs_robot_len=24, obs_human_len=(28 if human_control else 0))
def step(self, action):
self.take_step(action, robot_arm='left', gains=self.config('robot_gains'), forces=self.config('robot_forces'), human_gains=0.05)
total_force, tool_force, tool_force_on_human, total_force_on_human, new_contact_points = self.get_total_force()
end_effector_velocity = np.linalg.norm(p.getLinkState(self.tool, 1, computeForwardKinematics=True, computeLinkVelocity=True, physicsClientId=self.id)[6])
obs = self._get_obs([tool_force], [total_force_on_human, tool_force_on_human])
# Get human preferences
preferences_score = self.human_preferences(end_effector_velocity=end_effector_velocity, total_force_on_human=total_force_on_human, tool_force_at_target=tool_force_on_human)
reward_distance = -min([c[8] for c in p.getClosestPoints(self.tool, self.human, distance=4.0, physicsClientId=self.id)])
reward_action = -np.sum(np.square(action)) # Penalize actions
reward_new_contact_points = new_contact_points # Reward new contact points on a person
reward = self.config('distance_weight')*reward_distance + self.config('action_weight')*reward_action + self.config('wiping_reward_weight')*reward_new_contact_points + preferences_score
if self.gui and tool_force_on_human > 0:
print('Task success:', self.task_success, 'Force at tool on human:', tool_force_on_human, reward_new_contact_points)
info = {'total_force_on_human': total_force_on_human, 'task_success': int(self.task_success >= (self.total_target_count*self.config('task_success_threshold'))), 'action_robot_len': self.action_robot_len, 'action_human_len': self.action_human_len, 'obs_robot_len': self.obs_robot_len, 'obs_human_len': self.obs_human_len}
done = False
return obs, reward, done, info
def get_total_force(self):
total_force = 0
tool_force = 0
tool_force_on_human = 0
total_force_on_human = 0
new_contact_points = 0
for c in p.getContactPoints(bodyA=self.tool, physicsClientId=self.id):
total_force += c[9]
tool_force += c[9]
for c in p.getContactPoints(bodyA=self.robot, physicsClientId=self.id):
bodyB = c[2]
if bodyB != self.tool:
total_force += c[9]
for c in p.getContactPoints(bodyA=self.robot, bodyB=self.human, physicsClientId=self.id):
total_force_on_human += c[9]
for c in p.getContactPoints(bodyA=self.tool, bodyB=self.human, physicsClientId=self.id):
linkA = c[3]
linkB = c[4]
contact_position = np.array(c[6])
total_force_on_human += c[9]
if linkA in [1]:
tool_force_on_human += c[9]
# Contact with human upperarm, forearm, hand
if linkB < 0 or linkB > p.getNumJoints(self.human, physicsClientId=self.id):
continue
indices_to_delete = []
for i, (target_pos_world, target) in enumerate(zip(self.targets_pos_upperarm_world, self.targets_upperarm)):
if np.linalg.norm(contact_position - target_pos_world) < 0.025:
# The robot made contact with a point on the person's arm
new_contact_points += 1
self.task_success += 1
p.resetBasePositionAndOrientation(target, [1000, 1000, 1000], [0, 0, 0, 1], physicsClientId=self.id)
indices_to_delete.append(i)
self.targets_pos_on_upperarm = [t for i, t in enumerate(self.targets_pos_on_upperarm) if i not in indices_to_delete]
self.targets_upperarm = [t for i, t in enumerate(self.targets_upperarm) if i not in indices_to_delete]
self.targets_pos_upperarm_world = [t for i, t in enumerate(self.targets_pos_upperarm_world) if i not in indices_to_delete]
indices_to_delete = []
for i, (target_pos_world, target) in enumerate(zip(self.targets_pos_forearm_world, self.targets_forearm)):
if np.linalg.norm(contact_position - target_pos_world) < 0.025:
# The robot made contact with a point on the person's arm
new_contact_points += 1
self.task_success += 1
p.resetBasePositionAndOrientation(target, [1000, 1000, 1000], [0, 0, 0, 1], physicsClientId=self.id)
indices_to_delete.append(i)
self.targets_pos_on_forearm = [t for i, t in enumerate(self.targets_pos_on_forearm) if i not in indices_to_delete]
self.targets_forearm = [t for i, t in enumerate(self.targets_forearm) if i not in indices_to_delete]
self.targets_pos_forearm_world = [t for i, t in enumerate(self.targets_pos_forearm_world) if i not in indices_to_delete]
return total_force, tool_force, tool_force_on_human, total_force_on_human, new_contact_points
def _get_obs(self, forces, forces_human):
torso_pos = np.array(p.getLinkState(self.robot, 15 if self.robot_type == 'pr2' else 0, computeForwardKinematics=True, physicsClientId=self.id)[0])
state = p.getLinkState(self.tool, 1, computeForwardKinematics=True, physicsClientId=self.id)
tool_pos = np.array(state[0])
tool_orient = np.array(state[1]) # Quaternions
robot_joint_states = p.getJointStates(self.robot, jointIndices=self.robot_left_arm_joint_indices, physicsClientId=self.id)
robot_joint_positions = np.array([x[0] for x in robot_joint_states])
robot_pos, robot_orient = p.getBasePositionAndOrientation(self.robot, physicsClientId=self.id)
if self.human_control:
human_pos = np.array(p.getBasePositionAndOrientation(self.human, physicsClientId=self.id)[0])
human_joint_states = p.getJointStates(self.human, jointIndices=self.human_controllable_joint_indices, physicsClientId=self.id)
human_joint_positions = np.array([x[0] for x in human_joint_states])
# Human shoulder, elbow, and wrist joint locations
shoulder_pos, shoulder_orient = p.getLinkState(self.human, 5, computeForwardKinematics=True, physicsClientId=self.id)[:2]
elbow_pos, elbow_orient = p.getLinkState(self.human, 7, computeForwardKinematics=True, physicsClientId=self.id)[:2]
wrist_pos, wrist_orient = p.getLinkState(self.human, 9, computeForwardKinematics=True, physicsClientId=self.id)[:2]
robot_obs = np.concatenate([tool_pos-torso_pos, tool_orient, robot_joint_positions, shoulder_pos-torso_pos, elbow_pos-torso_pos, wrist_pos-torso_pos, forces]).ravel()
if self.human_control:
human_obs = np.concatenate([tool_pos-human_pos, tool_orient, human_joint_positions, shoulder_pos-human_pos, elbow_pos-human_pos, wrist_pos-human_pos, forces_human]).ravel()
else:
human_obs = []
return np.concatenate([robot_obs, human_obs]).ravel()
def reset(self):
self.setup_timing()
self.task_success = 0
self.contact_points_on_arm = {}
self.human, self.bed, self.robot, self.robot_lower_limits, self.robot_upper_limits, self.human_lower_limits, self.human_upper_limits, self.robot_right_arm_joint_indices, self.robot_left_arm_joint_indices, self.gender = self.world_creation.create_new_world(furniture_type='bed', static_human_base=False, human_impairment='random', print_joints=False, gender='random')
self.robot_lower_limits = self.robot_lower_limits[self.robot_left_arm_joint_indices]
self.robot_upper_limits = self.robot_upper_limits[self.robot_left_arm_joint_indices]
self.reset_robot_joints()
friction = 5
p.changeDynamics(self.bed, -1, lateralFriction=friction, spinningFriction=friction, rollingFriction=friction, physicsClientId=self.id)
# Setup human in the air and let them settle into a resting pose on the bed
joints_positions = [(3, np.deg2rad(30))]
controllable_joints = []
self.world_creation.setup_human_joints(self.human, joints_positions, controllable_joints, use_static_joints=False, human_reactive_force=None)
p.resetBasePositionAndOrientation(self.human, [-0.15, 0.2, 0.95], p.getQuaternionFromEuler([-np.pi/2.0, 0, 0], physicsClientId=self.id), physicsClientId=self.id)
p.setGravity(0, 0, -1, physicsClientId=self.id)
# Add small variation in human joint positions
for j in range(p.getNumJoints(self.human, physicsClientId=self.id)):
if p.getJointInfo(self.human, j, physicsClientId=self.id)[2] != p.JOINT_FIXED:
p.resetJointState(self.human, jointIndex=j, targetValue=self.np_random.uniform(-0.1, 0.1), targetVelocity=0, physicsClientId=self.id)
# Let the person settle on the bed
for _ in range(100):
p.stepSimulation(physicsClientId=self.id)
# Lock human joints and set velocities to 0
joints_positions = []
self.human_controllable_joint_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] if self.human_control else []
self.world_creation.setup_human_joints(self.human, joints_positions, self.human_controllable_joint_indices, use_static_joints=True, human_reactive_force=None, human_reactive_gain=0.01)
self.target_human_joint_positions = []
if self.human_control:
human_joint_states = p.getJointStates(self.human, jointIndices=self.human_controllable_joint_indices, physicsClientId=self.id)
self.target_human_joint_positions = np.array([x[0] for x in human_joint_states])
self.human_lower_limits = self.human_lower_limits[self.human_controllable_joint_indices]
self.human_upper_limits = self.human_upper_limits[self.human_controllable_joint_indices]
p.changeDynamics(self.human, -1, mass=0, physicsClientId=self.id)
p.resetBaseVelocity(self.human, linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0], physicsClientId=self.id)
p.setGravity(0, 0, 0, physicsClientId=self.id)
p.setGravity(0, 0, -1, body=self.human, physicsClientId=self.id)
# Find the base position and joint positions for a static person in bed
# print(p.getBasePositionAndOrientation(self.human, physicsClientId=self.id))
# joint_states = p.getJointStates(self.human, jointIndices=list(range(p.getNumJoints(self.human, physicsClientId=self.id))), physicsClientId=self.id)
# joint_positions = np.array([x[0] for x in joint_states])
# joint_string = '['
# for i, jp in enumerate(joint_positions):
# joint_string += '(%d, %.4f), ' % (i, jp)
# print(joint_string + ']')
# exit()
shoulder_pos, shoulder_orient = p.getLinkState(self.human, 5, computeForwardKinematics=True, physicsClientId=self.id)[:2]
elbow_pos, elbow_orient = p.getLinkState(self.human, 7, computeForwardKinematics=True, physicsClientId=self.id)[:2]
wrist_pos, wrist_orient = p.getLinkState(self.human, 9, computeForwardKinematics=True, physicsClientId=self.id)[:2]
target_pos = np.array([-0.6, 0.2, 1]) + self.np_random.uniform(-0.05, 0.05, size=3)
if self.robot_type == 'pr2':
target_orient = np.array(p.getQuaternionFromEuler(np.array([0, 0, 0]), physicsClientId=self.id))
self.position_robot_toc(self.robot, 76, [(target_pos, target_orient)], [(shoulder_pos, None), (elbow_pos, None), (wrist_pos, None)], self.robot_left_arm_joint_indices, self.robot_lower_limits, self.robot_upper_limits, ik_indices=range(29, 29+7), pos_offset=np.array([-0.1, 0, 0]), max_ik_iterations=200, step_sim=True, check_env_collisions=False, human_joint_indices=self.human_controllable_joint_indices, human_joint_positions=self.target_human_joint_positions)
self.world_creation.set_gripper_open_position(self.robot, position=0.2, left=True, set_instantly=True)
self.tool = self.world_creation.init_tool(self.robot, mesh_scale=[1]*3, pos_offset=[0, 0, 0], orient_offset=p.getQuaternionFromEuler([0, 0, 0], physicsClientId=self.id), maximal=False)
elif self.robot_type == 'jaco':
target_orient = p.getQuaternionFromEuler(np.array([0, np.pi/2.0, 0]), physicsClientId=self.id)
base_position, base_orientation, _ = self.position_robot_toc(self.robot, 8, [(target_pos, target_orient)], [(shoulder_pos, None), (elbow_pos, None), (wrist_pos, None)], self.robot_left_arm_joint_indices, self.robot_lower_limits, self.robot_upper_limits, ik_indices=[0, 1, 2, 3, 4, 5, 6], pos_offset=np.array([-0.05, 1.05, 0.6]), max_ik_iterations=200, step_sim=True, random_position=0.1, check_env_collisions=False, human_joint_indices=self.human_controllable_joint_indices, human_joint_positions=self.target_human_joint_positions)
self.world_creation.set_gripper_open_position(self.robot, position=1.1, left=True, set_instantly=True)
self.tool = self.world_creation.init_tool(self.robot, mesh_scale=[1]*3, pos_offset=[-0.01, 0, 0.03], orient_offset=p.getQuaternionFromEuler([0, -np.pi/2.0, 0], physicsClientId=self.id), maximal=False)
# Load a nightstand in the environment for the jaco arm
self.nightstand_scale = 0.275
visual_filename = os.path.join(self.world_creation.directory, 'nightstand', 'nightstand.obj')
collision_filename = os.path.join(self.world_creation.directory, 'nightstand', 'nightstand.obj')
nightstand_visual = p.createVisualShape(shapeType=p.GEOM_MESH, fileName=visual_filename, meshScale=[self.nightstand_scale]*3, rgbaColor=[0.5, 0.5, 0.5, 1.0], physicsClientId=self.id)
nightstand_collision = p.createCollisionShape(shapeType=p.GEOM_MESH, fileName=collision_filename, meshScale=[self.nightstand_scale]*3, physicsClientId=self.id)
nightstand_pos = np.array([-0.9, 0.7, 0]) + base_position
nightstand_orient = p.getQuaternionFromEuler(np.array([np.pi/2.0, 0, 0]), physicsClientId=self.id)
self.nightstand = p.createMultiBody(baseMass=0, baseCollisionShapeIndex=nightstand_collision, baseVisualShapeIndex=nightstand_visual, basePosition=nightstand_pos, baseOrientation=nightstand_orient, baseInertialFramePosition=[0, 0, 0], useMaximalCoordinates=False, physicsClientId=self.id)
else:
target_orient = p.getQuaternionFromEuler( | np.array([0, np.pi/2.0, 0]) | numpy.array |
# Script used to label the dataset files (data/data*.csv), call using the -h
# option for information.
# The program extracts preliminary features from the data, then sorts the
# results by feature importance and plots them in batches. Segments can be
# labeled by clicking on subplots.
import sys
import numpy as np
import pandas as pd
import argparse
from datetime import datetime
import glob
sys.path.append('lib')
import detect_peaks
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.patches import Rectangle
from peakutils.peak import indexes
from classes.Signal import Signal
from classes.DataSource import DataSource
# Parse arugments
data_file_id_choices = list(range(len(glob.glob("data/data*.csv"))))
parser = argparse.ArgumentParser(description='Label the dataset for training. \
A call should supply a data file index (e.g. 0 for ./data/data0.csv) and the label \
type (+/-). Figures of signal segment plots will be displayed, ordered by features that \
correlate with high signal to noise ratio. Labels for each signal segment \
are generated by clicking on the respective plot. The file ID, start, and end \
indices of the segment will be appended as a single new line in \
positive_ranges.csv or negative_ranges.csv, depending on the supplied label type.')
parser.add_argument('--file_id', type=int, default=20, required=True,
choices=data_file_id_choices,
help='data file index (e.g. 0 for ./data/data0.csv)')
parser.add_argument('--label_type', type=str, default="+", required=True,
choices=["+","-","positive","negative"],
help='e.g. +/-/positive/negative')
args = parser.parse_args()
FILE_ID = args.file_id
LABEL_TYPE = args.label_type
LABEL_TYPE = LABEL_TYPE.replace("+", "positive").replace("-", "negative")
# Helper functions
def onclick(event):
fx, fy = fig.transFigure.inverted().transform((event.x,event.y))
for i, subplot in enumerate(subplots):
if subplot["pos"].contains(fx,fy) and subplot["used"] == False:
range_ids = pd.DataFrame([subplot["range"]])
range_ids.to_csv('data/%s_ranges.csv' % LABEL_TYPE,
mode='a', header=False, index=False)
subplots[i]["used"] = True
fig.text(np.mean([subplot["pos"].x1,subplot["pos"].x0])-0.01,
| np.mean([subplot["pos"].y1,subplot["pos"].y0]) | numpy.mean |
""" Randomly sample a DCG with a maximum degree and cycle length """
import numpy as np
def erdos_random_graph(n_vars, max_degree, max_cycle):
""" Randomly sample a DCG with a maximum degree and cycle length """
support = np.zeros((n_vars, n_vars))
def degree(var):
return len(np.where(support[:, var])[0])+len( | np.where(support[var, :]) | numpy.where |
import glob
import random
import os
import numpy as np
import torch
from scipy import io
from collections import Counter
from torch.utils.data import Dataset
from PIL import Image
import torchvision.transforms as transforms
from torchvision import datasets
from skimage.transform import rescale
class CelebADataset(Dataset):
def __init__(self,
root,
ids=None,
transforms_=None,
mode='train',
attrs=[]):
self.transform = transforms.Compose(transforms_)
if ids is None:
self.files = sorted(glob.glob('%s/*.jpg' % root))
else:
ids_file = open(ids).read().split("\n")
self.files = ["%s/%s.jpg" % (root, id_) for id_ in ids_file]
self.files = self.files[:-2000] if mode == 'train' else self.files[-2000:]
self.label_path = "%s/list_attr_celeba.txt" % root
self.annotations = self.get_annotations(attrs)
self.keys1 = list(self.annotations.keys())
self.attrs = attrs
print("Attributes passed to CelebADataset:", attrs)
def get_annotations(self, attrs):
"""Extracts annotations for CelebA"""
annotations = {}
lines = [line.rstrip() for line in open(self.label_path, 'r')]
self.label_names = lines[1].split()
for _, line in enumerate(lines[2:]):
filename, *values = line.split()
labels = []
for attr in attrs:
idx = self.label_names.index(attr)
labels.append(1 * (values[idx] == '1'))
annotations[filename] = labels
return annotations
def __getitem__(self, index):
filepath = self.files[index % len(self.files)]
filename = filepath.split('/')[-1]
img = self.transform(Image.open(filepath))
label = self.annotations[filename]
label = torch.FloatTensor(np.array(label))
return img, label
def __len__(self):
return len(self.files)
class ZapposDataset(Dataset):
def __init__(self,
root,
ids=None,
shuffle=False,
thresh=0.65,
transforms_=None,
mode='train'):
self.transform = transforms.Compose(transforms_)
files = []
if ids is not None:
with open(ids) as f:
for line in f:
line = line.rstrip().split(",")
if float(line[1]) <= thresh:
files.append("%s/%s" % (root, line[0]))
else:
files = glob.glob("%s/**" % root, recursive=True)
self.files = np.asarray(
[os.path.abspath(file_) for file_ in files if file_.endswith('.jpg')]
)
# Now figure out all the class names and make a dictionary
# mapping them to indices.
self.classes = []
marker = "ut-zap50k-images-square"
for filename in self.files:
self.classes.append( "-".join(filename[ filename.index(marker)+len(marker)+1 :: ].split("/")[0:2]) )
counter = Counter(self.classes)
class_names = sorted(counter.keys())
self.name2idx = {name:i for i,name in enumerate(class_names)}
self.classes = np.asarray(self.classes)
# Shuffle files and classes if necessary.
rnd_state = | np.random.RandomState(0) | numpy.random.RandomState |
import time
import pandas as pd
import numpy as np
from hyperopt import STATUS_OK, Trials, hp, space_eval, tpe, fmin
import lightgbm as lgb
from cleanlab.classification import LearningWithNoisyLabels
from cleanlab.latent_estimation import estimate_latent
from cleanlab.latent_estimation import estimate_confident_joint_and_cv_pred_proba
from sklearn.model_selection import train_test_split
import CONSTANT
nesting_level = 0
is_start = None
def timeit(method, start_log=None):
def timed(*args, **kw):
global is_start
global nesting_level
if not is_start:
print()
is_start = True
log(f"Start [{method.__name__}]:" + (start_log if start_log else ""))
nesting_level += 1
start_time = time.time()
result = method(*args, **kw)
end_time = time.time()
nesting_level -= 1
log(f"End [{method.__name__}]. Time elapsed: {end_time - start_time:0.2f} sec.")
is_start = False
return result
return timed
def log(entry):
global nesting_level
space = "-" * (4 * nesting_level)
print(f"{space}{entry}")
@timeit
def print_feature_importance(gbm, feature_name=None):
if feature_name == None:
feature_name = gbm.feature_name()
feature_importance = list(gbm.feature_importance(importance_type='gain'))
zipped = zip(feature_name, feature_importance)
zipped_sorted = sorted(zipped, key=lambda x: x[1])
print("")
for name, score in zipped_sorted:
print(name, score)
print("")
def get_downsampling_num(npos, nneg, sample_num, unbalanced_ratio, min_neg_pos_ratio=2):
reverse = False
ntol = npos + nneg
if npos > nneg:
reverse = True
tmp = npos
npos = nneg
nneg = tmp
max_sample_num = min(npos, nneg) * (unbalanced_ratio + 1)
if max_sample_num > sample_num:
max_sample_num = sample_num
if npos + nneg > max_sample_num:
if nneg / npos <= min_neg_pos_ratio:
pos_num = npos / ntol * max_sample_num
neg_num = nneg / ntol * max_sample_num
elif nneg / npos <= unbalanced_ratio:
if npos > max_sample_num / (min_neg_pos_ratio + 1):
pos_num = max_sample_num / (min_neg_pos_ratio + 1)
neg_num = max_sample_num - pos_num
else:
pos_num = npos
neg_num = max_sample_num - pos_num
elif nneg / npos > unbalanced_ratio:
if npos > max_sample_num / (unbalanced_ratio + 1):
pos_num = max_sample_num / (unbalanced_ratio + 1)
neg_num = max_sample_num - pos_num
else:
pos_num = npos
neg_num = max_sample_num - npos
else:
neg_num = nneg
pos_num = npos
if neg_num / pos_num > unbalanced_ratio:
neg_num = pos_num * unbalanced_ratio
neg_num = int(neg_num)
pos_num = int(pos_num)
if reverse:
return neg_num, pos_num
return pos_num, neg_num
def sample(X, frac, seed, y=None):
if frac == 1:
X = X.sample(frac=1, random_state=seed)
elif frac > 1:
mul = int(frac)
frac = frac - int(frac)
X_res = X.sample(frac=frac, random_state=seed)
X = pd.concat([X] * mul + [X_res])
else:
X = X.sample(frac=frac, random_state=seed)
if y is not None:
y = y.loc[X.index]
return X, y
return X
def downsampling_num(y, max_sample_num):
npos = (y == 1).sum()
nneg = (y != 1).sum()
min_num = min(npos, nneg)
min_num = max(min_num, 1000)
if min_num < 8000:
unbalanced_ratio = 10 - (min_num // 1000)
else:
unbalanced_ratio = 3
pos_num, neg_num = get_downsampling_num(npos, nneg, max_sample_num, unbalanced_ratio)
return pos_num, neg_num
def class_sample(X, y, pos_num, neg_num, seed=2019):
npos = float((y == 1).sum())
nneg = len(y) - npos
pos_frac = pos_num / npos
neg_frac = neg_num / nneg
X_pos = X[y == 1]
X_pos = sample(X_pos, pos_frac, seed)
X_neg = X[y != 1]
X_neg = sample(X_neg, neg_frac, seed)
X = pd.concat([X_pos, X_neg])
X, y = sample(X, 1, seed, y)
return X, y
def downsampling(X, y, max_sample_num, seed=2019):
pos_num, neg_num = downsampling_num(y, max_sample_num)
return class_sample(X, y, pos_num, neg_num, seed)
def downcast(series, accuracy_loss=True, min_float_type='float16'):
if series.dtype == np.int64:
ii8 = np.iinfo(np.int8)
ii16 = np.iinfo(np.int16)
ii32 = np.iinfo(np.int32)
max_value = series.max()
min_value = series.min()
if max_value <= ii8.max and min_value >= ii8.min:
return series.astype(np.int8)
elif max_value <= ii16.max and min_value >= ii16.min:
return series.astype(np.int16)
elif max_value <= ii32.max and min_value >= ii32.min:
return series.astype(np.int32)
else:
return series
elif series.dtype == np.float64:
fi16 = np.finfo(np.float16)
fi32 = np.finfo(np.float32)
if accuracy_loss:
max_value = series.max()
min_value = series.min()
if np.isnan(max_value):
max_value = 0
if np.isnan(min_value):
min_value = 0
if min_float_type == 'float16' and max_value <= fi16.max and min_value >= fi16.min:
return series.astype(np.float16)
elif max_value <= fi32.max and min_value >= fi32.min:
return series.astype(np.float32)
else:
return series
else:
tmp = series[~pd.isna(series)]
if (len(tmp) == 0):
return series.astype(np.float16)
if (tmp == tmp.astype(np.float16)).sum() == len(tmp):
return series.astype(np.float16)
elif (tmp == tmp.astype(np.float32)).sum() == len(tmp):
return series.astype(np.float32)
else:
return series
else:
return series
def gen_combine_cats(df, cols):
category = df[cols[0]].astype('float64')
for col in cols[1:]:
mx = df[col].max()
category *= mx
category += df[col]
return category
import math
from typing import Dict, List
def predict_lightgbm(X: pd.DataFrame, model, npart=100, num_iteration=None) -> List:
full_len = len(X)
split_len = math.floor(full_len / npart)
yhats = []
for i in range(npart):
if False:
print(i)
start = i * split_len
if i == (npart - 1):
end = full_len
else:
end = start + split_len
if num_iteration is None:
yhats.append(model.predict(X.iloc[start: end]))
else:
yhats.append(model.predict(X.iloc[start: end], num_iteration))
yhat = np.concatenate(yhats)
return yhat
@timeit
def predict_gbm(model, X, num_iteration=None):
# return model.predict(X[model.feature_name()])
return predict_lightgbm(X[model.feature_name()], model, num_iteration=num_iteration)
@timeit
def predict_sklearn(model, X):
return model.predict_proba(X)[:, 1]
def print_feature_importance(gbm):
feature_name = gbm.feature_name()
feature_importance = list(gbm.feature_importance(importance_type='gain'))
zipped = zip(feature_name, feature_importance)
zipped_sorted = sorted(zipped, key=lambda x: x[1])
# prefix = f"log/{config['debug']['dataname']}"
print("")
for name, score in zipped_sorted:
print(name, score)
print("")
@timeit
def data_sample_percent_by_col(X: pd.DataFrame, col, y: pd.Series = None, percent: float = 0.1, minimum = 10,
maximum: int = 10000, random_state = 1, ret_y = True):
ids = X[col].unique()
nids = math.ceil(len(ids)*percent)
if nids > maximum:
nids = maximum
elif nids < minimum:
nids = len(ids)
ids_sample = ids[:nids]
# ids_sample = ids.sample(nids, random_state=random_state)
log(f'Sampling -> nids: {nids}, len(X): {len(X)}')
X_sample = X.loc[X[col].isin(ids_sample)]
if ret_y:
y_sample = y[X_sample.index]
return X_sample, y_sample
else:
return X_sample
def get_log_lr(num_boost_round,max_lr,min_lr):
learning_rates = [max_lr+(min_lr-max_lr)/ | np.log(num_boost_round) | numpy.log |
import functools
import numpy
import numpy.testing
import pytest
import six.moves
import skimage.util
import tests.modules
import cellprofiler_core.image
import cellprofiler_core.measurement
import cellprofiler_core.module
import cellprofiler.modules.imagemath
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.preferences
import cellprofiler_core.workspace
cellprofiler_core.preferences.set_headless()
MEASUREMENT_NAME = "mymeasurement"
@pytest.fixture(scope="function")
def module():
return cellprofiler.modules.imagemath.ImageMath()
@pytest.fixture(scope="function")
def workspace(image_a, image_b, module):
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
workspace = cellprofiler_core.workspace.Workspace(
image_set=image_set,
image_set_list=image_set_list,
module=module,
pipeline=cellprofiler_core.pipeline.Pipeline(),
measurements=cellprofiler_core.measurement.Measurements(),
object_set=cellprofiler_core.object.ObjectSet(),
)
workspace.image_set.add("input_a", image_a)
workspace.image_set.add("input_b", image_b)
module.images[0].image_name.value = "input_a"
module.images[0].factor.value = 1.0
module.images[1].image_name.value = "input_b"
module.images[1].factor.value = 1.0
module.truncate_low.value = False
module.truncate_high.value = False
module.output_image_name.value = "output"
return workspace
def run_operation(operation, expected, module, workspace):
module.operation.value = operation
module.replace_nan.value = False
module.run(workspace)
output = workspace.image_set.get_image("output")
actual = output.pixel_data
numpy.testing.assert_array_equal(actual, expected)
class TestVolumes(object):
@staticmethod
@pytest.fixture(scope="function")
def image_a():
k, i, j = numpy.mgrid[-5:6, -5:6, -5:10]
data_a = numpy.zeros((11, 11, 15))
data_a[k ** 2 + i ** 2 + j ** 2 <= 25] = 1
image_a = cellprofiler_core.image.Image()
image_a.pixel_data = data_a
image_a.dimensions = 3
return image_a
@staticmethod
@pytest.fixture(scope="function")
def image_b():
k, i, j = numpy.mgrid[-5:6, -5:6, -10:5]
data_b = numpy.zeros((11, 11, 15))
data_b[k ** 2 + i ** 2 + j ** 2 <= 25] = 0.5
image_b = cellprofiler_core.image.Image()
image_b.pixel_data = data_b
image_b.dimensions = 3
return image_b
@staticmethod
def test_add(image_a, image_b, module, workspace):
operation = "Add"
expected = image_a.pixel_data + image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_subtract(image_a, image_b, module, workspace):
operation = "Subtract"
expected = image_a.pixel_data - image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_absolute_difference(image_a, image_b, module, workspace):
operation = "Absolute Difference"
expected = numpy.abs(image_a.pixel_data - image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_multiply(image_a, image_b, module, workspace):
operation = "Multiply"
expected = image_a.pixel_data * image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_divide(image_a, image_b, module, workspace):
operation = "Divide"
expected = image_a.pixel_data / image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_average(image_a, image_b, module, workspace):
operation = "Average"
expected = (image_a.pixel_data + image_b.pixel_data) / 2.0
run_operation(operation, expected, module, workspace)
@staticmethod
def test_minimum(image_a, image_b, module, workspace):
operation = "Minimum"
expected = numpy.minimum(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_maximum(image_a, image_b, module, workspace):
operation = "Maximum"
expected = numpy.maximum(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_invert(image_a, module, workspace):
operation = "Invert"
expected = skimage.util.invert(image_a.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_log_transform(image_a, module, workspace):
operation = "Log transform (base 2)"
expected = numpy.log2(image_a.pixel_data + 1)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_and(image_a, image_b, module, workspace):
operation = "And"
expected = 1.0 * numpy.logical_and(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_or(image_a, image_b, module, workspace):
operation = "Or"
expected = numpy.logical_or(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_not(image_a, module, workspace):
operation = "Not"
expected = numpy.logical_not(image_a.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_equals(image_a, image_b, module, workspace):
operation = "Equals"
expected = image_a.pixel_data == image_b.pixel_data
run_operation(operation, expected, module, workspace)
class TestBinaryImages(object):
@staticmethod
@pytest.fixture()
def image_a():
data_a = numpy.random.rand(128, 128) > 0.5
image_a = cellprofiler_core.image.Image()
image_a.pixel_data = data_a
image_a.dimensions = 2
return image_a
@staticmethod
@pytest.fixture()
def image_b():
data_b = numpy.random.rand(128, 128) > 0.5
image_b = cellprofiler_core.image.Image()
image_b.pixel_data = data_b
image_b.dimensions = 2
return image_b
@staticmethod
def test_add(image_a, image_b, module, workspace):
operation = "Add"
expected = numpy.logical_or(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_subtract(image_a, image_b, module, workspace):
operation = "Subtract"
expected = image_a.pixel_data.copy()
expected[image_b.pixel_data] = False
run_operation(operation, expected, module, workspace)
@staticmethod
def test_absolute_difference(image_a, image_b, module, workspace):
operation = "Absolute Difference"
expected = numpy.logical_xor(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_multiply(image_a, image_b, module, workspace):
operation = "Multiply"
expected = numpy.logical_and(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_divide(image_a, image_b, module, workspace):
operation = "Divide"
expected = image_a.pixel_data / image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_average(image_a, image_b, module, workspace):
operation = "Average"
expected = numpy.logical_or(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_minimum(image_a, image_b, module, workspace):
operation = "Minimum"
expected = numpy.logical_and(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_maximum(image_a, image_b, module, workspace):
operation = "Maximum"
expected = numpy.logical_or(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_invert(image_a, module, workspace):
operation = "Invert"
expected = numpy.logical_not(image_a.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_log_transform(image_a, module, workspace):
operation = "Log transform (base 2)"
expected = image_a.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_and(image_a, image_b, module, workspace):
operation = "And"
expected = numpy.logical_and(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_or(image_a, image_b, module, workspace):
operation = "Or"
expected = numpy.logical_or(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_not(image_a, module, workspace):
operation = "Not"
expected = | numpy.logical_not(image_a.pixel_data) | numpy.logical_not |
# !/usr/bin/env python
# Copyright (c) 2016-2017, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import sys
import unittest
import wradlib.georef as georef
import wradlib.util as util
from wradlib.io import read_generic_hdf5, open_raster, gdal_create_dataset
import numpy as np
from osgeo import gdal, osr, ogr
from deprecation import fail_if_not_removed
np.set_printoptions(edgeitems=3, infstr='inf', linewidth=75, nanstr='nan',
precision=8, suppress=False, threshold=1000,
formatter=None)
class CoordinateTransformTest(unittest.TestCase):
def setUp(self):
self.r = np.array([0., 0., 111., 111., 111., 111.]) * 1000
self.az = np.array([0., 180., 0., 90., 180., 270.])
self.th = np.array([0., 0., 0., 0., 0., 0.5])
self.csite = (9.0, 48.0)
self.result_xyz = tuple(
(np.array([0., 0., 0., 110993.6738, 0., -110976.7856]),
np.array([0., -0., 110993.6738, 0., -110976.7856, -0.]),
np.array([0., 0., 725.7159843, 725.7159843, 725.7159843,
1694.22337134])))
self.result = tuple(
(np.array([9., 9., 9., 10.49189531, 9., 7.50810469]),
np.array([48., 48., 48.99839742, 47.99034027, 47.00160258,
47.99034027]),
np.array([0., 0., 967.03198482, 967.03198482, 967.03198482,
1935.45679527])))
self.result_n = tuple(
(np.array([9., 9., 9., 10.48716091, 9., 7.51306531]),
np.array([48., 48., 48.99814438, 47.99037251, 47.00168131,
47.99037544]),
np.array([0., 0., 725.7159843, 725.7159843, 725.7159843,
1694.22337134])))
@fail_if_not_removed
def test_hor2aeq(self):
self.assertTrue(np.allclose(georef.misc.hor2aeq(0.25, 0.5, 0.75),
(-0.29983281824238966,
0.22925926995789672)))
@fail_if_not_removed
def test_aeq2hor(self):
self.assertTrue(np.allclose(georef.misc.aeq2hor(0.22925926995789672,
-0.29983281824238966,
0.75),
(0.25, 0.5)))
@fail_if_not_removed
def test_polar2lonlat(self):
self.assertTrue(
np.allclose(georef.polar2lonlat(self.r, self.az, self.csite),
self.result[:2]))
@fail_if_not_removed
def test_polar2lonlatalt(self):
self.assertTrue(np.allclose(
georef.polar2lonlatalt(self.r, self.az, self.th, self.csite),
self.result, rtol=1e-03))
def test_spherical_to_xyz(self):
coords, rad = georef.spherical_to_xyz(self.r, self.az,
self.th, self.csite)
self.assertTrue(np.allclose(coords[..., 0], self.result_xyz[0],
rtol=1e-03))
self.assertTrue(np.allclose(coords[..., 1], self.result_xyz[1],
rtol=1e-03))
self.assertTrue(np.allclose(coords[..., 2], self.result_xyz[2],
rtol=1e-03))
def test_bin_altitude(self):
altitude = georef.bin_altitude(np.arange(10., 101., 10.)
* 1000., 2., 0, 6370040.)
altref = np.array([354.87448647, 721.50702113, 1099.8960815,
1490.04009656, 1891.93744678, 2305.58646416,
2730.98543223, 3168.13258613, 3617.02611263,
4077.66415017])
np.testing.assert_allclose(altref, altitude)
def test_bin_distance(self):
distance = georef.bin_distance(np.arange(10., 101., 10.) * 1000., 2.,
0, 6370040.)
distref = np.array([9993.49302358, 19986.13717891, 29977.90491409,
39968.76869178, 49958.70098959, 59947.6743006,
69935.66113377, 79922.63401441, 89908.5654846,
99893.4281037])
np.testing.assert_allclose(distref, distance)
def test_site_distance(self):
altitude = georef.bin_altitude(np.arange(10., 101., 10.) * 1000., 2.,
0, 6370040.)
distance = georef.site_distance(np.arange(10., 101., 10.) * 1000., 2.,
altitude, 6370040.)
distref = np.array([9993.49302358, 19986.13717891, 29977.90491409,
39968.76869178, 49958.70098959, 59947.6743006,
69935.66113377, 79922.63401441, 89908.5654846,
99893.4281037])
np.testing.assert_allclose(distref, distance)
def test_spherical_to_proj(self):
coords = georef.spherical_to_proj(self.r, self.az,
self.th, self.csite)
self.assertTrue(np.allclose(coords[..., 0], self.result_n[0]))
self.assertTrue(np.allclose(coords[..., 1], self.result_n[1]))
self.assertTrue(np.allclose(coords[..., 2], self.result_n[2]))
@fail_if_not_removed
def test_polar2lonlatalt_n(self):
lon, lat, alt = georef.polar2lonlatalt_n(self.r, self.az,
self.th, self.csite)
self.assertTrue(np.allclose(lon, self.result_n[0]))
self.assertTrue(np.allclose(lat, self.result_n[1]))
self.assertTrue( | np.allclose(alt, self.result_n[2]) | numpy.allclose |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 11:11:49 2017
@author: robertmarsland
"""
from __future__ import division
import numpy as np
import pandas as pd
from numpy.random import dirichlet
import numbers
#DEFAULT PARAMETERS FOR CONSUMER AND METABOLIC MATRICES, AND INITIAL STATE
a_default = {'sampling':'Binary', #{'Gaussian','Binary','Gamma'} specifies choice of sampling algorithm
'SA': 6*np.ones(3), #Number of species in each specialist family (here, 3 families of 60 species)
'MA': 3*np.ones(3), #Number of resources in each class
'Sgen': 30, #Number of generalist species (unbiased sampling over alll resource classes)
'muc': 10, #Mean sum of consumption rates (used in all models)
'sigc': 3, #Standard deviation of sum of consumption rates for Gaussian and Gamma models
'q': 0.0, #Preference strength of specialist families (0 for generalist and 1 for specialist)
'c0':0.0, #Sum of background consumption rates in binary model
'c1':1., #Specific consumption rate in binary model
'l':0.8, #Leakage fraction
'fs':0.45, #Fraction of secretion flux with same resource type
'fw':0.45, #Fraction of secretion flux to 'waste' resource
'sparsity':0.2, #Effective sparsity of metabolic matrix (between 0 and 1)
'n_wells':10, #Number of independent wells
'S':100, #Number of species per well (randomly sampled from the pool of size Stot = sum(SA) + Sgen)
'food':0, #index of food source (when a single resource is supplied externally)
'R0_food':1000, #unperturbed fixed point for supplied food
'regulation':'independent', #metabolic regulation (see dRdt)
'response':'type I', #functional response (see dRdt)
'supply':'off' #resource supply (see dRdt)
}
def MakeInitialState(assumptions):
"""
Construct stochastically colonized initial state, at unperturbed resource fixed point.
assumptions = dictionary of metaparameters
'SA' = number of species in each family
'MA' = number of resources of each type
'Sgen' = number of generalist species
'n_wells' = number of independent wells in the experiment
'S' = initial number of species per well
'food' = index of supplied "food" resource
'R0_food' = unperturbed fixed point for supplied food resource
Returns:
N0 = initial consumer populations
R0 = initial resource concentrations
"""
#PREPARE VARIABLES
#Force number of species to be an array:
if isinstance(assumptions['MA'],numbers.Number):
assumptions['MA'] = [assumptions['MA']]
if isinstance(assumptions['SA'],numbers.Number):
assumptions['SA'] = [assumptions['SA']]
#Force numbers of species to be integers:
assumptions['MA'] = np.asarray(assumptions['MA'],dtype=int)
assumptions['SA'] = np.asarray(assumptions['SA'],dtype=int)
assumptions['Sgen'] = int(assumptions['Sgen'])
#Extract total numbers of resources, consumers, resource types, and consumer families:
M = int(np.sum(assumptions['MA']))
T = len(assumptions['MA'])
S_tot = int(np.sum(assumptions['SA'])+assumptions['Sgen'])
F = len(assumptions['SA'])
#Construct lists of names of resources, consumers, resource types, consumer families and wells:
resource_names = ['R'+str(k) for k in range(M)]
type_names = ['T'+str(k) for k in range(T)]
family_names = ['F'+str(k) for k in range(F)]
consumer_names = ['S'+str(k) for k in range(S_tot)]
resource_index = [[type_names[m] for m in range(T) for k in range(assumptions['MA'][m])],
resource_names]
consumer_index = [[family_names[m] for m in range(F) for k in range(assumptions['SA'][m])]
+['GEN' for k in range(assumptions['Sgen'])],consumer_names]
well_names = ['W'+str(k) for k in range(assumptions['n_wells'])]
R0 = np.zeros((M,assumptions['n_wells']))
N0 = np.zeros((S_tot,assumptions['n_wells']))
if not isinstance(assumptions['food'],int):
assert len(assumptions['food']) == assumptions['n_wells'], 'Length of food vector must equal n_wells.'
food_list = assumptions['food']
else:
food_list = np.ones(assumptions['n_wells'],dtype=int)*assumptions['food']
if not (isinstance(assumptions['R0_food'],int) or isinstance(assumptions['R0_food'],float)):
assert len(assumptions['R0_food']) == assumptions['n_wells'], 'Length of food vector must equal n_wells.'
R0_food_list = assumptions['R0_food']
else:
R0_food_list = np.ones(assumptions['n_wells'],dtype=int)*assumptions['R0_food']
for k in range(assumptions['n_wells']):
N0[np.random.choice(S_tot,size=assumptions['S'],replace=False),k]=1.
R0[food_list[k],k] = R0_food_list[k]
N0 = pd.DataFrame(N0,index=consumer_index,columns=well_names)
R0 = pd.DataFrame(R0,index=resource_index,columns=well_names)
return N0, R0, M, T, S_tot, F
def MakeMatrices(assumptions):
"""
Construct consumer matrix and metabolic matrix.
assumptions = dictionary of metaparameters
'sampling' = {'Gaussian','Binary','Gamma'} specifies choice of sampling algorithm
'SA' = number of species in each family
'MA' = number of resources of each type
'Sgen' = number of generalist species
'muc' = mean sum of consumption rates
'sigc' = standard deviation for Gaussian sampling of consumer matrix
'q' = family preference strength (from 0 to 1)
'c0' = row sum of background consumption rates for Binary sampling
'c1' = specific consumption rate for Binary sampling
'fs' = fraction of secretion flux into same resource type
'fw' = fraction of secretion flux into waste resource type
'sparsity' = effective sparsity of metabolic matrix (from 0 to 1)
'wate_type' = index of resource type to designate as "waste"
Returns:
c = consumer matrix
D = metabolic matrix
"""
#PREPARE VARIABLES
#Force number of species to be an array:
if isinstance(assumptions['MA'],numbers.Number):
assumptions['MA'] = [assumptions['MA']]
if isinstance(assumptions['SA'],numbers.Number):
assumptions['SA'] = [assumptions['SA']]
#Force numbers of species to be integers:
assumptions['MA'] = np.asarray(assumptions['MA'],dtype=int)
assumptions['SA'] = np.asarray(assumptions['SA'],dtype=int)
assumptions['Sgen'] = int(assumptions['Sgen'])
#Default waste type is last type in list:
if 'waste_type' not in assumptions.keys():
assumptions['waste_type']=len(assumptions['MA'])-1
#Extract total numbers of resources, consumers, resource types, and consumer families:
M = np.sum(assumptions['MA'])
T = len(assumptions['MA'])
S = np.sum(assumptions['SA'])+assumptions['Sgen']
F = len(assumptions['SA'])
M_waste = assumptions['MA'][assumptions['waste_type']]
#Construct lists of names of resources, consumers, resource types, and consumer families:
resource_names = ['R'+str(k) for k in range(M)]
type_names = ['T'+str(k) for k in range(T)]
family_names = ['F'+str(k) for k in range(F)]
consumer_names = ['S'+str(k) for k in range(S)]
waste_name = type_names[assumptions['waste_type']]
resource_index = [[type_names[m] for m in range(T) for k in range(assumptions['MA'][m])],
resource_names]
consumer_index = [[family_names[m] for m in range(F) for k in range(assumptions['SA'][m])]
+['GEN' for k in range(assumptions['Sgen'])],consumer_names]
#PERFORM GAUSSIAN SAMPLING
if assumptions['sampling'] == 'Gaussian':
#Initialize dataframe:
c = pd.DataFrame(np.zeros((S,M)),columns=resource_index,index=consumer_index)
#Add Gaussian-sampled values, biasing consumption of each family towards its preferred resource:
for k in range(F):
for j in range(T):
if k==j:
c_mean = (assumptions['muc']/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
c_var = (assumptions['sigc']**2/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
else:
c_mean = (assumptions['muc']/M)*(1-assumptions['q'])
c_var = (assumptions['sigc']**2/M)*(1-assumptions['q'])
c.loc['F'+str(k)]['T'+str(j)] = c_mean + np.random.randn(assumptions['SA'][k],assumptions['MA'][j])*np.sqrt(c_var)
if 'GEN' in c.index:
c_mean = assumptions['muc']/M
c_var = assumptions['sigc']**2/M
c.loc['GEN'] = c_mean + np.random.randn(assumptions['Sgen'],M)*np.sqrt(c_var)
#PERFORM BINARY SAMPLING
elif assumptions['sampling'] == 'Binary':
assert assumptions['muc'] < M*assumptions['c1'], 'muc not attainable with given M and c1.'
#Construct uniform matrix at total background consumption rate c0:
c = pd.DataFrame(np.ones((S,M))*assumptions['c0']/M,columns=resource_index,index=consumer_index)
#Sample binary random matrix blocks for each pair of family/resource type:
for k in range(F):
for j in range(T):
if k==j:
p = (assumptions['muc']/(M*assumptions['c1']))*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
else:
p = (assumptions['muc']/(M*assumptions['c1']))*(1-assumptions['q'])
c.loc['F'+str(k)]['T'+str(j)] = (c.loc['F'+str(k)]['T'+str(j)].values
+ assumptions['c1']*BinaryRandomMatrix(assumptions['SA'][k],assumptions['MA'][j],p))
#Sample uniform binary random matrix for generalists:
if 'GEN' in c.index:
p = assumptions['muc']/(M*assumptions['c1'])
c.loc['GEN'] = c.loc['GEN'].values + assumptions['c1']*BinaryRandomMatrix(assumptions['Sgen'],M,p)
elif assumptions['sampling'] == 'Gamma':
#Initialize dataframe
c = pd.DataFrame(np.zeros((S,M)),columns=resource_index,index=consumer_index)
#Add Gamma-sampled values, biasing consumption of each family towards its preferred resource
for k in range(F):
for j in range(T):
if k==j:
c_mean = (assumptions['muc']/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
c_var = (assumptions['sigc']**2/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
thetac = c_var/c_mean
kc = c_mean**2/c_var
c.loc['F'+str(k)]['T'+str(j)] = np.random.gamma(kc,scale=thetac,size=(assumptions['SA'][k],assumptions['MA'][j]))
else:
c_mean = (assumptions['muc']/M)*(1-assumptions['q'])
c_var = (assumptions['sigc']**2/M)*(1-assumptions['q'])
thetac = c_var/c_mean
kc = c_mean**2/c_var
c.loc['F'+str(k)]['T'+str(j)] = np.random.gamma(kc,scale=thetac,size=(assumptions['SA'][k],assumptions['MA'][j]))
if 'GEN' in c.index:
c_mean = assumptions['muc']/M
c_var = assumptions['sigc']**2/M
thetac = c_var/c_mean
kc = c_mean**2/c_var
c.loc['GEN'] = np.random.gamma(kc,scale=thetac,size=(assumptions['Sgen'],M))
#PERFORM GAUSSIAN SAMPLING
elif assumptions['sampling'] == 'Binary_Gamma':
assert assumptions['muc'] < M*assumptions['c1'], 'muc not attainable with given M and c1.'
#Construct uniform matrix at total background consumption rate c0:
c = pd.DataFrame(np.ones((S,M))*assumptions['c0']/M,columns=resource_index,index=consumer_index)
#Sample binary random matrix blocks for each pair of family/resource type:
for k in range(F):
for j in range(T):
if k==j:
p = (assumptions['muc']/(M*assumptions['c1']))*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
c_mean = (assumptions['muc']/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
c_var = (assumptions['sigc']**2/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
else:
p = (assumptions['muc']/(M*assumptions['c1']))*(1-assumptions['q'])
c_mean = (assumptions['muc']/M)*(1-assumptions['q'])
c_var = (assumptions['sigc']**2/M)*(1-assumptions['q'])
c_mean_binary = assumptions['c0']+ assumptions['c1']*p
c_var_binary = assumptions['c1']**2 *p*(1-p)
c_mean_gamma = c_mean/c_mean_binary
c_var_gamma = (c_var - c_var_binary*(c_mean_gamma**2))/(c_var_binary + c_mean_binary**2)
thetac = c_var_gamma/c_mean_gamma
kc = c_mean_gamma**2/c_var_gamma
c.loc['F'+str(k)]['T'+str(j)] = (c.loc['F'+str(k)]['T'+str(j)].values + assumptions['c1']*BinaryRandomMatrix(assumptions['SA'][k],assumptions['MA'][j],p))*np.random.gamma(kc,scale=thetac,size=(assumptions['SA'][k],assumptions['MA'][j]))
#Sample uniform binary random matrix for generalists:
if 'GEN' in c.index:
p = assumptions['muc']/(M*assumptions['c1'])
c_mean = assumptions['muc']/M
c_var = assumptions['sigc']**2/M
c_mean_binary = assumptions['c0']+ assumptions['c1']*p
c_var_binary = assumptions['c1']**2 *p*(1-p)
c_mean_gamma = c_mean/c_mean_binary
c_var_gamma = (c_var - c_var_binary*(c_mean_gamma**2))/(c_var_binary + c_mean_binary**2)
thetac = c_var_gamma/c_mean_gamma
kc = c_mean_gamma**2/c_var_gamma
c.loc['GEN'] = (c.loc['GEN'].values + assumptions['c1']*BinaryRandomMatrix(assumptions['Sgen'],M,p))*np.random.gamma(kc,scale=thetac,size=(assumptions['Sgen'],M))
elif assumptions['sampling'] == 'Uniform':
#Initialize dataframe:
c = pd.DataFrame(np.zeros((S,M)),columns=resource_index,index=consumer_index)
#Add uniformly sampled values, biasing consumption of each family towards its preferred resource:
for k in range(F):
for j in range(T):
if k==j:
c_mean = (assumptions['muc']/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
else:
c_mean = (assumptions['muc']/M)*(1-assumptions['q'])
c.loc['F'+str(k)]['T'+str(j)] = c_mean + (np.random.rand(assumptions['SA'][k],assumptions['MA'][j])-0.5)*assumptions['b']
if 'GEN' in c.index:
c_mean = assumptions['muc']/M
c.loc['GEN'] = c_mean + (np.random.rand(assumptions['Sgen'],M)-0.5)*assumptions['b']
else:
print('Invalid distribution choice. Valid choices are kind=Gaussian, kind=Binary, kind=Gamma, kind=Uniform.')
return 'Error'
#SAMPLE METABOLIC MATRIX FROM DIRICHLET DISTRIBUTION
DT = pd.DataFrame(np.zeros((M,M)),index=c.keys(),columns=c.keys())
for type_name in type_names:
MA = len(DT.loc[type_name])
if type_name is not waste_name:
#Set background secretion levels
p = pd.Series(np.ones(M)*(1-assumptions['fs']-assumptions['fw'])/(M-MA-M_waste),index = DT.keys())
#Set self-secretion level
p.loc[type_name] = assumptions['fs']/MA
#Set waste secretion level
p.loc[waste_name] = assumptions['fw']/M_waste
#Sample from dirichlet
DT.loc[type_name] = dirichlet(p/assumptions['sparsity'],size=MA)
else:
if M > MA:
#Set background secretion levels
p = pd.Series(np.ones(M)*(1-assumptions['fw']-assumptions['fs'])/(M-MA),index = DT.keys())
#Set self-secretion level
p.loc[type_name] = (assumptions['fw']+assumptions['fs'])/MA
else:
p = pd.Series(np.ones(M)/M,index = DT.keys())
#Sample from dirichlet
DT.loc[type_name] = dirichlet(p/assumptions['sparsity'],size=MA)
return c, DT.T
def MakeParams(assumptions):
"""
Makes a dictionary of parameters, using MakeMatrices for the matrices, MakeInitialState
for the resource supply point, and setting everything else to 1, except l which is zero.
Parameter values can be modified from 1 (or zero for l) by adding their name-value pairs
to the assumptions dictionary.
"""
c, D = MakeMatrices(assumptions)
N0,R0, M, T, S_tot, F = MakeInitialState(assumptions)
if not isinstance(assumptions['food'],int) or not isinstance(assumptions['R0_food'],int):
params=[{'c':c,
'm':1,
'w':1,
'D':D,
'g':1,
'l':0,
'R0':R0.values[:,k],
'tau':1,
'r':1,
'sigma_max':1,
'nreg':10,
'n':2
} for k in range(assumptions['n_wells'])]
for item in ['m','w','g','l','tau','r','sigma_max','n','nreg']:
if item in assumptions.keys():
for k in range(assumptions['n_wells']):
params[k][item] = assumptions[item]
else:
params={'c':c,
'm':1,
'w':1,
'D':D,
'g':1,
'l':0,
'R0':R0.values[:,0],
'tau':1,
'r':1,
'sigma_max':1,
'nreg':10,
'n':2
}
for item in ['m','w','g','l','tau','r','sigma_max','n','nreg']:
if item in assumptions.keys():
params[item] = assumptions[item]
return params
def MakeResourceDynamics(assumptions):
"""
Construct resource dynamics. 'assumptions' must be a dictionary containing at least
three entries:
response = {'type I', 'type II', 'type III'} specifies nonlinearity of growth law
regulation = {'independent','energy','mass'} allows microbes to adjust uptake
rates to favor the most abundant accessible resources (measured either by
energy or mass)
supply = {'off','external','self-renewing'} sets choice of
intrinsic resource dynamics
Returns a function of N, R, and the model parameters, which itself returns the
vector of resource rates of change dR/dt
"""
sigma = {'type I': lambda R,params: params['c']*R,
'type II': lambda R,params: params['c']*R/(1+params['c']*R/params['sigma_max']),
'type III': lambda R,params: (params['c']*R)**params['n']/(1+(params['c']*R)**params['n']/params['sigma_max'])
}
u = {'independent': lambda x,params: 1.,
'energy': lambda x,params: (((params['w']*x)**params['nreg']).T
/np.sum((params['w']*x)**params['nreg'],axis=1)).T,
'mass': lambda x,params: ((x**params['nreg']).T/np.sum(x**params['nreg'],axis=1)).T
}
h = {'off': lambda R,params: 0.,
'external': lambda R,params: (params['R0']-R)/params['tau'],
'self-renewing': lambda R,params: params['r']*R*(params['R0']-R),
'predator': lambda R,params: params['r']*R*(params['R0']-R)-params['u']*R
}
J_in = lambda R,params: (u[assumptions['regulation']](params['c']*R,params)
*params['w']*sigma[assumptions['response']](R,params))
J_out = lambda R,params: (params['l']*J_in(R,params)).dot(params['D'].T)
return lambda N,R,params: (h[assumptions['supply']](R,params)
-(J_in(R,params)/params['w']).T.dot(N)
+(J_out(R,params)/params['w']).T.dot(N))
def MakeConsumerDynamics(assumptions):
"""
Construct resource dynamics. 'assumptions' must be a dictionary containing at least
three entries:
response = {'type I', 'type II', 'type III'} specifies nonlinearity of growth law
regulation = {'independent','energy','mass'} allows microbes to adjust uptake
rates to favor the most abundant accessible resources (measured either by
energy or mass)
supply = {'off','external','self-renewing','predator'} sets choice of
intrinsic resource dynamics
Returns a function of N, R, and the model parameters, which itself returns the
vector of consumer rates of change dN/dt
"""
sigma = {'type I': lambda R,params: params['c']*R,
'type II': lambda R,params: params['c']*R/(1+params['c']*R/params['sigma_max']),
'type III': lambda R,params: (params['c']*R)**params['n']/(1+(params['c']*R)**params['n']/params['sigma_max'])
}
u = {'independent': lambda x,params: 1.,
'energy': lambda x,params: (((params['w']*x)**params['nreg']).T
/np.sum((params['w']*x)**params['nreg'],axis=1)).T,
'mass': lambda x,params: ((x**params['nreg']).T/np.sum(x**params['nreg'],axis=1)).T
}
J_in = lambda R,params: (u[assumptions['regulation']](params['c']*R,params)
*params['w']*sigma[assumptions['response']](R,params))
J_growth = lambda R,params: (1-params['l'])*J_in(R,params)
return lambda N,R,params: params['g']*N*(np.sum(J_growth(R,params),axis=1)-params['m'])
def MixPairs(plate1, plate2, R0_mix = 'Com1'):
"""
Perform "community coalescence" by mixing pairs of communities.
plate1, plate2 = plates containing communities to be mixed
R0_mix = {'Com1', 'Com2', matrix of dimension Mxn_wells1xn_wells2} specifies
the resource profile to be supplied to the mixture
Returns:
plate_mixed = plate containing 50/50 mixtures of all pairs of communities
from plate1 and plate2.
N_1, N_2 = compositions of original communities
N_sum = initial compositions of mixed communities
"""
assert np.all(plate1.N.index == plate2.N.index), "Communities must have the same species names."
assert | np.all(plate1.R.index == plate2.R.index) | numpy.all |
import numpy as np
import warnings
#GLM
from pyglmnet import GLM
#NN
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Lambda
from keras.regularizers import l2
from keras.optimizers import Nadam, adam
from keras.layers.normalization import BatchNormalization
#CV
from sklearn.model_selection import KFold
#XGB
import xgboost as xgb
#RF
from sklearn.ensemble import RandomForestRegressor
#LSTM
from keras.layers import LSTM
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Lambda
class MLencoding(object):
"""
This class implements several conveniences for fitting and
and predicting a neuron's activity with encoding models built
from machine learning methods.
Parameters
----------
tunemodel: str, {'glm','feedforward_nn','xgboost','lstm','random_forest'}
OR
sklearn-style instance with methods 'fit' & 'predict'
random_state: int, seed for numpy.random`
verbose: bool, whether to print convergence / loss, default: False
History terms (optional):
cov_history = whether to use covariate history in prediction
spike_history = whether to use spike history in predicition
max_time = how long back, in ms, to use covariate/spike history from (if using)
n_filters = number of temporal bases to use to span the interval [0, max_time]
window = how many ms are in each time bin?
n_every = predict all time bins (nevery = 1), or every once in a while?
Note that if time bins are randomly selected for test/train split,
use nevery >= max_time/time_window to prevent leakage
For ensemble fitting:
Fit an stacked ensemble by setting is_ensemble = True. This requires that you
supply a list of 1st-order methods.
is_ensemble = True/False. whether to train tunemodel on the results of 1st-order models
first_order_models = list of MLenconding instances. Needed if is_ensemble = True
Callable methods
----------------
set_params
fit
fit_cv
predict
get_params
Internal methods (also Callable)
-------------------------------
get_all_with_history_keras
poisson_pseudoR2
raised_cosine_filter
temporal_filter
"""
def __init__(self, tunemodel='glm', spike_history = False, cov_history = False,
random_state=1, window = 0, n_filters = 0, max_time = 0, n_every = 1,
verbose=0, is_ensemble = False, first_order_models = None):
"""
Initialize the object
"""
self.tunemodel = tunemodel
self.verbose = verbose
self.spike_history = spike_history
self.cov_history = cov_history
self.window = window
self.n_filters = n_filters
self.max_time = max_time
self.n_every = n_every
self.is_ensemble = is_ensemble
self.first_order_models = first_order_models
np.random.seed(random_state)
if isinstance(tunemodel,str):
valid_models = ['glm','feedforward_nn','xgboost','lstm','random_forest']
if tunemodel not in valid_models:
raise NotImplementedError('Invalid model type. Got {}, not in {}'.format(
tunemodel, valid_models))
# Assign optimization parameters
# -------------------------------
self.set_default_params()
# If tunemodel is not a str we assume it's a predefined sklearn-style model
else:
self.model = tunemodel
if spike_history or cov_history:
try:
assert all([p>0 for p in [window, n_filters, max_time]])
assert isinstance(n_filters, int)
except AssertionError:
print('window,n_filters, and max_time must all be ' +
' greater than 0 if spike or covariate history are used.')
raise
try:
assert int(max_time/window) >= n_filters
except AssertionError:
print('There are more time filters than there are time points '+
'per example. Need max_time//window >= n_filters')
raise
if tunemodel == 'lstm':
try:
assert spike_history and cov_history
except AssertionError:
print('Recurrent models need history!\n' +
'Set spike_history=True and cov_history=True')
raise
if is_ensemble == True:
try:
for model in first_order_models:
assert isinstance(model, MLencoding)
except:
print('first_order_models needs to be a list of MLencoding objects '+
'if is_ensemble == True.')
raise
def set_default_params(self):
"""
A function that sets model parameters to some default.
"""
tunemodel = self.tunemodel
# Assign 'default' parameters;
if tunemodel == 'glm':
self.params = {'distr':'softplus', 'alpha':0.1, 'tol':1e-8,
'reg_lambda':np.logspace(np.log(0.05), np.log(0.0001), 10, base=np.exp(1)),
'learning_rate':2, 'max_iter':10000, 'eta':2.0}
elif tunemodel == 'feedforward_nn':
self.params = {'dropout': 0.05,
'l2': 1.6e-08,
'lr': 0.001,
'n1': 76, #number of layers in 1st hidden layer
'n2': 16,
'decay': 0.009, 'clipnorm' : 1.3, 'b1' : 0.2, 'b2' : 0.02}
elif tunemodel == 'xgboost':
self.params = {'objective': "count:poisson", #for poisson output
'eval_metric': "logloss", #loglikelihood loss
'seed': 2925, #for reproducibility
'silent': 1,
'learning_rate': 0.05,
'min_child_weight': 2, 'n_estimators': 580,
'subsample': 0.6, 'max_depth': 5, 'gamma': 0.4}
elif tunemodel == 'random_forest':
self.params = {'max_depth': 15,
'min_samples_leaf': 4,
'min_samples_split': 5,
'min_weight_fraction_leaf': 0.0,
'n_estimators': 471}
elif tunemodel == 'lstm':
self.params = {'epochs': 8, 'n_units': 45, 'dropout': 0.00491871366927632,
'batch_size': 101}
if isinstance(tunemodel,str):
self.default_params = True
def set_params(self,params):
"""Method for setting the parameters of the regression method."""
assert isinstance(params,dict)
for k in params.keys():
self.params[k] = params[k]
if not isinstance(self.tunemodel,str):
# can use this method to access sklearn's set_params method
# if method predefined and not a string
self.tunemodel.set_params(**params)
self.default_params = False
def get_params(self):
"""Prints the current parameters of the model."""
return self.params
def fit(self, X, Y, get_history_terms = True):
"""
Fits the model to the data in X to predict the response Y.
Imports models and creates model instance as well.
Parameters
----------
X: float, n_samples x n_features, features of interest
Y: float, n_samples x 1, population activity
get_history_terms = Boolean. Whether to compute the temporal features.
Note that if spike_history and cov_history are False,
no history will be computed anyways and the flag does nothing.
"""
if self.default_params:
warnings.warn('\n Using default hyperparameters. Consider optimizing on'+
' a held-out dataset using, e.g. hyperopt or random search')
# make the covariate matrix. Include spike or covariate history?
# The different methods here are to satisfy the needs of recurrent keras
# models
if get_history_terms:
if self.tunemodel == 'lstm':
X, Y = self.get_all_with_history_keras(X, Y)
else:
X, Y = self.get_all_with_history(X, Y)
if self.tunemodel == 'glm':
model = GLM(**self.params)
model.fit(X, Y)
# we want the last of the regularization path
self.model = model[-1]
elif self.tunemodel == 'feedforward_nn':
if np.ndim(X)==1:
X = np.transpose(np.atleast_2d(X))
params = self.params
model = Sequential()
model.add(Dense(params['n1'], input_dim=np.shape(X)[1], kernel_initializer='glorot_normal',
activation='relu', kernel_regularizer=l2(params['l2'])))
model.add(Dropout(params['dropout']))
model.add(BatchNormalization())
model.add(Dense(params['n2'], kernel_initializer='glorot_normal'
, activation='relu',kernel_regularizer=l2(params['l2'])))
model.add(BatchNormalization())
model.add(Dense(1,activation='softplus'))
optim = adam(lr=params['lr'], clipnorm=params['clipnorm'],
decay = params['decay'],
beta_1=1-params['b1'], beta_2=1-params['b2'])
model.compile(loss='poisson', optimizer=optim,)
hist = model.fit(X, Y, batch_size = 128, epochs=30, verbose=self.verbose)
self.model = model
elif self.tunemodel == 'xgboost':
dtrain = xgb.DMatrix(X, label=Y)
num_round = 200
self.model = xgb.train(self.params, dtrain, num_round)
elif self.tunemodel == 'random_forest':
self.model = RandomForestRegressor(**self.params)
self.model.fit(X, Y)
elif self.tunemodel == 'lstm':
if np.ndim(X)==1:
X = np.transpose(np.atleast_2d(X))
params = self.params
model=Sequential() #Declare model
#Add recurrent layer
model.add(LSTM(int(params['n_units']),input_shape=(X.shape[1],X.shape[2]),\
dropout_W=params['dropout'],dropout_U=params['dropout']))
#Within recurrent layer, include dropout
model.add(Dropout(params['dropout'])) #Dropout some units (recurrent layer output units)
#Add dense connections to output layer
model.add(Dense(1,activation='softplus'))
#Fit model (and set fitting parameters)
model.compile(loss='poisson',optimizer='rmsprop',metrics=['accuracy'])
model.fit(X,Y,epochs=int(params['epochs']),
batch_size = int(params['batch_size']),verbose=self.verbose) #Fit the model
self.model = model
else: #using predefined model
self.model.fit(X,Y)
def predict(self, X, get_history_terms = True):
"""
Compute the firing rates for the neuron
based on the fit of specified tuning model.
Parameters
----------
X: float, n_samples x n_features, feature of interest
get_history_terms = Boolean. Whether to compute the temporal features.
Note that if spike_history and cov_history are False,
no history will be computed anyways and the flag does nothing.
Outputs
-------
Y: float, (n_samples,) , predicted activity
"""
Y_null = np.zeros((X.shape[0],))
if get_history_terms:
if self.tunemodel == 'lstm':
X, Y_null = self.get_all_with_history_keras(X, Y_null)
else:
X, Y_null = self.get_all_with_history(X, Y_null)
if self.tunemodel == 'xgboost':
X = xgb.DMatrix(X)
Y = self.model.predict(X)
return Y.flatten()
def raised_cosine_filter(self,n_bins, k, nBases = 15):
"""Return a cosine bump, kth of nBases, such that the bases tile
the interval [0, n_bins].
To plot these bases:
for i in range(10):
b = raised_cosine_filter(250, i, nBases = 10)
plt.plot(b)
"""
assert all([isinstance(p,int) for p in [n_bins, k, nBases]])
t = np.linspace(0,self.max_time,n_bins)
nt = np.log(t+0.1)
cSt,cEnd = nt[1],nt[-1]
db = (cEnd - cSt) / (nBases)
c = np.arange(cSt,cEnd,db)
bas = np.zeros((nBases,t.shape[0]))
filt = lambda x: ( np.cos( \
np.maximum(-np.pi, np.minimum(np.pi,(nt - c[x])*np.pi/(db) )) ) \
+ 1) / 2;
this_filt = filt(k)
return this_filt/np.sum(this_filt)
def temporal_filter(self,variables,nfilt=10, keras_format = False, scale = None):
""" Performs convolution of various filters upon each variable (column) in the input array
Inputs:
variables = an array of shape (n_bins, n_variables)
nfilt = number of filters
keras_format = return a 2d or 3d array
scale = function for scaling, centering variables.
Outputs:
history_filters = an array of shape(n_bins, n_variables x n_filters)
OR
an array of shape(n_bins, n_filters, n_variables) if keras_format
^ these are different shapes because of the redundant 3D
format that Keras wants its variables for RNNs
"""
if scale == None:
scale = lambda x: x
if variables.ndim == 1:
variables = np.reshape(variables,(variables.shape[0],1))
# We'll use 10 bases up to 250 ms
window = self.window
n_bins = int(self.max_time/window)
n_vars = variables.shape[1]
history_filters = np.zeros((variables.shape[0],n_vars*nfilt))
if keras_format:
history_filters = np.zeros((variables.shape[0],nfilt,n_vars))
for i in range(nfilt):
#get raised cosine filter
filt = self.raised_cosine_filter(n_bins,i,nfilt)
#apply it to each variable
this_filter = np.zeros(variables.shape)
for j in range(n_vars):
temp = np.convolve(variables[:,j],filt)[:variables.shape[0]]
this_filter[:,j] = temp
if keras_format:
history_filters[:, i, :] = scale(this_filter)
else:
history_filters[:,(n_vars*i):(n_vars*(i+1))] = this_filter
return history_filters
def get_all_with_history(self,raw_covariates, raw_spikes,
cov_history = None,
nfilt = None, spike_history = None,
normalize = False,
n_every = None):
"""
Inputs:
raw_spikes = (nbins,) array of binned spikes
raw_covariates = (nbins,nvars) array of binned covariates
cov_history = whether to use covariate history in prediction
spike_history = whether to use spike history in predicition
normalize = whether to set normalize mean and covariance of all covariates & their history
n_every = predict all time bins (nevery = 1), or every once in a while?
Note that if time bins are randomly selected for test/train split,
use nevery >= 250/time_window to prevent leakage
nfilt = number of temporal features. Uses raised cosine bases up to 250 ms
Returns:
covariates = array with columns as covariates. Columns go:
[current cov.] + [cov. convolved with temporal filters, if chosen] +
[spike history convolated with filters]
spikes = an array of spike bins, to be used as training/test Y
"""
if cov_history == None:
cov_history = self.cov_history
if nfilt == None:
nfilt = self.n_filters
if spike_history == None:
spike_history = self.spike_history
if n_every == None:
n_every = self.n_every
assert raw_spikes.ndim == 1
data_indices = range(n_every-1,raw_spikes.shape[0],n_every)
spikes = raw_spikes[data_indices]
covariates = raw_covariates[data_indices,:]
# then we convolve spikes to get spike histories
# will be (n_bins, nfilt) array
if spike_history:
spike_histories = self.temporal_filter(raw_spikes,nfilt)
assert spike_histories.shape == (raw_spikes.shape[0],nfilt)
covariates = np.hstack((covariates,spike_histories[data_indices,:]))
# and we get covariate histories
if cov_history:
cov_histories = self.temporal_filter(raw_covariates,nfilt)
assert cov_histories.shape == (raw_spikes.shape[0],nfilt*raw_covariates.shape[1])
covariates = np.hstack((covariates,cov_histories[data_indices,:]))
if normalize:
from sklearn.preprocessing import scale
covariates = scale(covariates)
return covariates, spikes
def get_all_with_history_keras(self, raw_covariates,raw_spikes,
bins_before=0,temporal_bases = None,
spike_history= None,
covariate_history = None, normalize = True):
"""
Function that creates the covariate matrix for a Keras LSTM (or RNN, etc.)
Note: assumes continuity of data. Call on separate CV folds
Note: covariate_history must be true, otherwise LSTM doesn't really make sense
----------
raw_spikes: a matrix of shape (n_samples,)
the number of spikes in each time bin for each neuron
raw_covariates: a matrix of size "number of time bins" x "number of covariates"
the number of spikes in each time bin for each neuron
temporal_bases: None, or int
Whether to use bins or a convolved kernal with some number of features
If no temporal bases, would you like to use the raw bins before? -->
bins_before: integer
How many bins of neural data prior to the output are used
Ignored if temporal_bases is > 0
Returns
-------
X: a matrix of size "number of total time bins - number of temporal items"
x "number of temporal items" x "1+n_features"
"""
if temporal_bases==None:
temporal_bases = self.n_filters
if spike_history==None:
spike_history= self.spike_history
if covariate_history==None:
covariate_history = self.cov_history
assert raw_spikes.ndim == 1 and raw_covariates.shape[0]==raw_spikes.shape[0]
assert covariate_history #
num_examples=raw_spikes.shape[0] #Number of total time bins we have neural data for
n_features = raw_covariates.shape[1]
sh = ch = 0
if spike_history: sh = 1
if covariate_history: ch = 1
if normalize:
from sklearn.preprocessing import scale
raw_covariates = scale(raw_covariates)
else: scale = lambda x: x
if temporal_bases:
first_n = int(self.max_time/self.window) # early bins where we don't have all covariates
spikes = raw_spikes[:]
covariates = np.zeros((num_examples, 1+temporal_bases, sh+ch*n_features))
covariates[:,0,sh:] = raw_covariates # input current covariates
# then we convolve spikes to get spike histories
if spike_history:
spike_histories = self.temporal_filter(raw_spikes,temporal_bases,
scale=scale)
assert spike_histories.shape == (num_examples,temporal_bases)
covariates[:,1:,0] = spike_histories # spike history input will be 0 at 'curr' input
# and we get covariate histories
if covariate_history:
cov_histories = self.temporal_filter(raw_covariates,
temporal_bases, keras_format = True, scale=scale)
assert cov_histories.shape == (num_examples,temporal_bases,n_features)
covariates[:,1:,sh:] = cov_histories
#drop incomplete samples
covariates = covariates[:,:,:]
elif bins_before:
# This part adapted from <NAME>'s code
spikes = raw_spikes[:]
covariates = np.zeros((num_examples, 1+bins_before, sh+ch*n_features))
covariates[:,0,sh:] = raw_covariates # input current covariates
#Loop through each time bin, and collect the spikes occurring in surrounding time bins
#Note that the first "bins_before" and last "bins_after" rows of X will remain filled with NaNs, since they don't get filled in below.
#This is because, for example, we cannot collect 10 time bins of spikes before time bin 8
for start_idx in range(num_examples-bins_before): #The first bins_before
#The bins of neural data we will be including are between start_idx
#and end_idx (which will have length "bins_before")
end_idx=start_idx+bins_before;
if spike_history:
#Put neural data from surrounding bins in X, starting at row "bins_before"
covariates[start_idx+bins_before,1:,0]=raw_spikes[start_idx:end_idx]
if covariate_history:
#Put neural data from surrounding bins in X, starting at row "bins_before"
covariates[start_idx+bins_before,1:,sh:]=raw_covariates[start_idx:end_idx,:]
#drop incomplete samples
covariates = covariates[:,:,:]
else:
covariates, spikes = raw_covariates, raw_spikes
return covariates, spikes
def fit_cv(self, X, Y, n_cv=10, verbose=1, continuous_folds = False):
"""Performs cross-validated fitting.
Input
=====
X = input data
Y = spiking data
n_cv = number of cross-validations folds
continuous_folds = True/False. whether to split folds randomly or to
split them in contiguous chunks. The latter is advantageous
when using spike history as a covariate to prevent
leakage across folds
Returns
(Y_hat, pR2_cv); a vector of predictions Y_hat with the
same dimensions as Y, and a list of pR2 scores on each fold pR2_cv.
"""
if not continuous_folds:
if self.spike_history:
try:
assert self.n_every >= int(self.max_time/self.window)
except AssertionError:
print('Warning: Using random CV folds when spike history is used ' + \
'will cause data leakage unless we predict spikes at an ' + \
'interval greater than the length of history used to predict.\n'+\
'Set continuous_folds = True '+ \
'or increase n_every above max_time/window' )
if self.tunemodel=='lstm':
assert continuous_folds
if self.is_ensemble == True:
print('Running nested CV scheme on first order models.')
return self.ensemble_cv(X, Y, continuous_folds = continuous_folds,
n_cv_outer=n_cv, n_cv_inner=n_cv)
if np.ndim(X)==1:
X = np.transpose(np.atleast_2d(X))
n_samples = X.shape[0]
# get history terms
if self.tunemodel == 'lstm':
X, Y = self.get_all_with_history_keras(X, Y)
else:
X, Y = self.get_all_with_history(X, Y)
Y_hat=np.zeros(len(Y))
pR2_cv = list()
if continuous_folds:
# sporadic prediction with continuous_folds not yet implemented
assert self.n_every==1
for i in range(n_cv):
if verbose > 1:
print( '...runnning cv-fold', i, 'of', n_cv)
test_start = int(n_samples*i/n_cv)
test_end = int(n_samples*(i+1)/n_cv)
train_indices = list(range(n_samples)[:test_start])\
+ list(range(n_samples)[test_end:])
Xr = X[train_indices, :]
Yr = Y[train_indices]
Xt = X[test_start:test_end, :]
Yt = Y[test_start:test_end]
self.fit(Xr, Yr, get_history_terms = False)
Yt_hat = self.predict(Xt, get_history_terms = False)
Yt_hat = | np.squeeze(Yt_hat) | numpy.squeeze |
"""
Test script for utils.py function.
"""
import os
import numpy as np
import pytest
from astropy import units as u
from cwinpy.utils import (
ellipticity_to_q22,
gcd_array,
get_psr_name,
initialise_ephemeris,
int_to_alpha,
is_par_file,
logfactorial,
q22_to_ellipticity,
)
from lalpulsar.PulsarParametersWrapper import PulsarParametersPy
def test_logfactorial():
"""
Test log factorial function
"""
a = 3
assert logfactorial(a) == np.log(3 * 2 * 1)
a = 3.0
assert logfactorial(a) == np.log(3 * 2 * 1)
def test_gcd_array():
"""
Test greatest common divisor function.
"""
a = 1 # non-list value
with pytest.raises(TypeError):
gcd_array(a)
a = [1] # single value
with pytest.raises(ValueError):
gcd_array(a)
a = [5, 25, 90]
assert gcd_array(a) == 5
def test_int_to_alpha():
"""
Test integer to alphabetical string conversion.
"""
pos = 2.3
with pytest.raises(TypeError):
int_to_alpha(pos)
pos = -1
with pytest.raises(ValueError):
int_to_alpha(pos)
assert int_to_alpha(1) == "A"
assert int_to_alpha(1, case="lower") == "a"
assert int_to_alpha(26) == "Z"
assert int_to_alpha(26, case="lower") == "z"
assert int_to_alpha(27) == "AA"
assert int_to_alpha(28) == "AB"
assert int_to_alpha(200) == "GR"
assert int_to_alpha(1000) == "ALL"
def test_is_par_file():
"""
Test failure of is_par_file.
"""
assert is_par_file("blah_blah_blah") is False
# test par files that don't contain required attributes
brokenpar = "broken.par"
values = {
"F": [100.0],
"RAJ": 0.1,
"DECJ": -0.1,
"PSRJ": "J0101-0101",
}
for leavekey in list(values.keys()):
keys = list(values.keys())
psr = PulsarParametersPy()
for key in keys:
if key != leavekey:
psr[key] = values[key]
psr.pp_to_par(brokenpar)
assert is_par_file(brokenpar) is False
os.remove(brokenpar)
def test_get_psr_name():
"""
Test extraction of pulsar name.
"""
for item, name in zip(
["PSRJ", "PSRB", "PSR", "NAME"],
["J0123+1234", "B0124+12", "J0123+1234", "B0124+12"],
):
psr = PulsarParametersPy()
psr[item] = name
assert get_psr_name(psr) == name
def test_ellipticity_to_q22():
"""
Test ellipticity conversion to mass quadrupole.
"""
epsilon = [1e-9, 1e-8]
expected_q22 = np.array([1e29, 1e30]) * np.sqrt(15.0 / (8.0 * np.pi))
q22 = ellipticity_to_q22(epsilon[0])
assert np.isclose(q22, expected_q22[0])
# test units
q22units = ellipticity_to_q22(epsilon[0], units=True)
assert np.isclose(q22units.value, expected_q22[0])
assert q22units.unit == u.Unit("kg m2")
# test array like
q22 = ellipticity_to_q22(epsilon)
assert len(q22) == len(epsilon)
assert np.allclose(q22, expected_q22)
def test_q22_to_ellipticity_to_q22():
"""
Test mass quadrupole conversion to ellipticity.
"""
q22 = [1e29, 1e30]
expected_epsilon = | np.array([1e-9, 1e-8]) | numpy.array |
#coding:utf-8
'''
一个编码器:将ground truth注释转换成SSD兼容的训练目标
Copyright (C) 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
from bounding_box_utils.bounding_box_utils import iou, convert_coordinates
from ssd_encoder_decoder.matching_utils import match_bipartite_greedy, match_multi
class SSDInputEncoder:
# 将图像中目标检测的ground truth labels (2维 bounding box 坐标和类别标签) 转换成训练SSD模型需要的形式
def __init__(self,
img_height,
img_width,
n_classes,
predictor_sizes,
min_scale=0.1,
max_scale=0.9,
scales=None,
aspect_ratios_global=[0.5, 1.0, 2.0],
aspect_ratios_per_layer=None,
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.3,
border_pixels='half',
coords='centroids',
normalize_coords=True,
background_id=0):
'''
参数:
img_height (int): 输入图像的高度.
img_width (int): 输入图像的宽度.
n_classes (int): 数据集类别数量(不包括背景类),比如 Pascal VOC为20, MS COCO为80.
predictor_sizes (list):包含卷积预测层输出的宽高
min_scale (float, optional): 必须 >0.
max_scale (float, optional): scaling factors可以通过最小与最大之间的线性插值得到,必须大于等于min_scale.
scales (list, optional): 每个预测层的scaling factors..
aspect_ratios_global (list, optional): 全局比例
aspect_ratios_per_layer (list, optional):每层比例
two_boxes_for_ar1 (bool, optional): box的比例是否包含1.
steps (list, optional): 输入到预测层缩放倍数
offsets (list, optional): 每个格子长宽的一半
clip_boxes (bool, optional): 如果 `True`,剪裁超过图像边界的框
variances (list, optional): 坐标偏移值会除以这个值
matching_type (str, optional): 可以是`multi`或者`bipartite`.
'bipartite', 每一个ground truth box会和与之IoU最高的一个default box匹配。
'multi',除了之前提到的bipartite匹配,所有IoU高于或者等于`pos_iou_threshold`的default box
会和对应的ground truth box匹配.
pos_iou_threshold (float, optional): multi模式中判断是否匹配时的iou阈值,高于此值标为正样本。
neg_iou_limit (float, optional): 低于这个值的被标记为负样本,
如果default box不是正样本也不是负样本,不参与训练。
border_pixels (str, optional): 如何对待bounding boxes的边界像素。
可以是 'include', 'exclude', 或者 'half'.
如果是'include',边界像素属于boxes。如果是`exclude`,边界像素不属于boxes。
如果是 'half',水平和垂直中的一个边界属于boxes,另一个不属于。
coords (str, optional):模型中使用的坐标形式。(不是真实标签的输入形式)
'centroids':(cx, cy, w, h) (box center coordinates, width, height),
'minmax' :(xmin, xmax, ymin, ymax)
'corners':(xmin, ymin, xmax, ymax)
normalize_coords (bool, optional): 如果为`True`,使用相对坐标而不是绝对坐标。
将坐标归一化到[0,1]之间。
background_id (int, optional): 确定背景类别的ID。
'''
predictor_sizes = np.array(predictor_sizes)
if predictor_sizes.ndim == 1:
predictor_sizes = np.expand_dims(predictor_sizes, axis=0)
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
if (len(scales) != predictor_sizes.shape[0] + 1):
raise ValueError("It must be either scales is None or len(scales) == len(predictor_sizes)+1, but len(scales) == {} and len(predictor_sizes)+1 == {}".format(len(scales), len(predictor_sizes)+1))
scales = np.array(scales)
if np.any(scales <= 0):
raise ValueError("All values in `scales` must be greater than 0, but the passed list of scales is {}".format(scales))
else:
if not 0 < min_scale <= max_scale:
raise ValueError("It must be 0 < min_scale <= max_scale, but it is min_scale = {} and max_scale = {}".format(min_scale, max_scale))
if not (aspect_ratios_per_layer is None):
if (len(aspect_ratios_per_layer) != predictor_sizes.shape[0]):
raise ValueError("It must be either aspect_ratios_per_layer is None or len(aspect_ratios_per_layer) == len(predictor_sizes), but len(aspect_ratios_per_layer) == {} and len(predictor_sizes) == {}".format(len(aspect_ratios_per_layer), len(predictor_sizes)))
for aspect_ratios in aspect_ratios_per_layer:
if np.any(np.array(aspect_ratios) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
else:
if (aspect_ratios_global is None):
raise ValueError("At least one of `aspect_ratios_global` and `aspect_ratios_per_layer` must not be `None`.")
if np.any(np.array(aspect_ratios_global) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received.".format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}".format(variances))
if not (coords == 'minmax' or coords == 'centroids' or coords == 'corners'):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
if (not (steps is None)) and (len(steps) != predictor_sizes.shape[0]):
raise ValueError("You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != predictor_sizes.shape[0]):
raise ValueError("You must provide at least one offset value per predictor layer.")
self.img_height = img_height
self.img_width = img_width
self.n_classes = n_classes + 1 # +1是背景类
self.predictor_sizes = predictor_sizes
self.min_scale = min_scale
self.max_scale = max_scale
# 如果`scales` 为空, 计算`min_scale` and `max_scale`之间的线性插值作为scaling factors
if (scales is None):
self.scales = np.linspace(self.min_scale, self.max_scale, len(self.predictor_sizes)+1)# np.linspace创建等差数列
else:
# 如果scales已经具体给出,直接使用就可以,不用从`min_scale`和`max_scale`计算得出.
self.scales = scales
# 如果 `aspect_ratios_per_layer`为空,每层使用相同的`aspect_ratios_global` 中的值
if (aspect_ratios_per_layer is None):
self.aspect_ratios = [aspect_ratios_global] * predictor_sizes.shape[0]
else:
# 如果每层的宽高比已给出,我们使用这些值就可以了。
self.aspect_ratios = aspect_ratios_per_layer
self.two_boxes_for_ar1 = two_boxes_for_ar1
if not (steps is None):
self.steps = steps
else:
self.steps = [None] * predictor_sizes.shape[0]
if not (offsets is None):
self.offsets = offsets
else:
self.offsets = [None] * predictor_sizes.shape[0]
self.clip_boxes = clip_boxes
self.variances = variances
self.matching_type = matching_type
self.pos_iou_threshold = pos_iou_threshold
self.neg_iou_limit = neg_iou_limit
self.border_pixels = border_pixels
self.coords = coords
self.normalize_coords = normalize_coords
self.background_id = background_id
# 计算每个预测层的每个空间位置的boxes的数量
# 例如,如果一个而预测层有三种比例[1.0, 0.5, 2.0],并且对于比例1.0,预测两个不同尺寸的boxes,
# 预测层就会在特征图的每一个空间位置一共预测四个Boxs
if not (aspect_ratios_per_layer is None):
self.n_boxes = []
for aspect_ratios in aspect_ratios_per_layer:
if (1 in aspect_ratios) & two_boxes_for_ar1:
self.n_boxes.append(len(aspect_ratios) + 1)
else:
self.n_boxes.append(len(aspect_ratios))
else:
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
self.n_boxes = len(aspect_ratios_global) + 1
else:
self.n_boxes = len(aspect_ratios_global)
##################################################################################
# 计算每个预测层的defaults boxes list有n个预测层的数据,每个层为`(feature_map_height, feature_map_width, n_boxes, 4)`.
##################################################################################
self.boxes_list = []
for i in range(len(self.predictor_sizes)):
boxes= self.generate_anchor_boxes_for_layer(feature_map_size=self.predictor_sizes[i],
this_scale=self.scales[i],
next_scale=self.scales[i+1],
this_steps=self.steps[i],
this_offsets=self.offsets[i])
self.boxes_list.append(boxes)
def __call__(self, ground_truth_labels):
'''
将真实数据转换成训练需要的格式
参数:ground_truth_labels (list):(class_id, xmin, ymin, xmax, ymax)
返回:y_encoded, (batch_size, #boxes, #classes + 4 + 4 + 4)
'''
#1. 真实标签顺序
class_id = 0
xmin = 1
ymin = 2
xmax = 3
ymax = 4
batch_size = len(ground_truth_labels)
# 整理anchor box的格式(batch_size, #boxes, #classes + 12)
y_encoded = self.generate_encoding_template(batch_size=batch_size)
# 匹配真实box和anchor box
y_encoded[:, :, self.background_id] = 1 # 所有boxes默认为背景.
n_boxes = y_encoded.shape[1]
class_vectors = np.eye(self.n_classes) # one-hot class vectors
for i in range(batch_size): # For each batch item...
if ground_truth_labels[i].size == 0: continue # If there is no ground truth for this batch item, there is nothing to match.
labels = ground_truth_labels[i].astype(np.float) # The labels for this batch item
# Check for degenerate ground truth bounding boxes before attempting any computations.
if np.any(labels[:,[xmax]] - labels[:,[xmin]] <= 0) or np.any(labels[:,[ymax]] - labels[:,[ymin]] <= 0):
raise DegenerateBoxError("SSDInputEncoder detected degenerate ground truth bounding boxes for batch item {} with bounding boxes {}, ".format(i, labels) +
"i.e. bounding boxes where xmax <= xmin and/or ymax <= ymin. Degenerate ground truth " +
"bounding boxes will lead to NaN errors during the training.")
# normalize
if self.normalize_coords:
labels[:,[ymin,ymax]] /= self.img_height
labels[:,[xmin,xmax]] /= self.img_width
# 可能需要转换坐标格式
if self.coords == 'centroids':
labels = convert_coordinates(labels, start_index=xmin, conversion='corners2centroids', border_pixels=self.border_pixels)
elif self.coords == 'minmax':
labels = convert_coordinates(labels, start_index=xmin, conversion='corners2minmax')
classes_one_hot = class_vectors[labels[:, class_id].astype(np.int)] # The one-hot class IDs for the ground truth boxes of this batch item
labels_one_hot = np.concatenate([classes_one_hot, labels[:, [xmin,ymin,xmax,ymax]]], axis=-1) # The one-hot version of the labels for this batch item
# 计算IoU `(num_ground_truth_boxes, num_anchor_boxes)`.
similarities = iou(labels[:,[xmin,ymin,xmax,ymax]], y_encoded[i,:,-12:-8], coords=self.coords, mode='outer_product', border_pixels=self.border_pixels)
# 1. 找到和每个真实框IOU最高的一个default box,这里保证了每一个真实框将至少匹配到一个default box.
bipartite_matches = match_bipartite_greedy(weight_matrix=similarities)
# 将真实标签写入匹配到的default boxes中
y_encoded[i, bipartite_matches, :-8] = labels_one_hot
# 将匹配到的default box设为0,表示已经匹配
similarities[:, bipartite_matches] = 0
#2. 剩余的default box会寻找与其IOU最大的真实框,如果IOU大于阈值pos_iou_threshold,匹配成功
if self.matching_type == 'multi':
matches = match_multi(weight_matrix=similarities, threshold=self.pos_iou_threshold)
y_encoded[i, matches[1], :-8] = labels_one_hot[matches[0]]
similarities[:, matches[1]] = 0
# 最后: 剩下的框中如果有IOU大于neg_iou_limit,设置为中立,因为和真实框比较接近,不适合作为背景类参与训练
max_background_similarities = np.amax(similarities, axis=0)
neutral_boxes = np.nonzero(max_background_similarities >= self.neg_iou_limit)[0]
y_encoded[i, neutral_boxes, self.background_id] = 0
# 2.将坐标转换成偏移值
if self.coords == 'centroids':
y_encoded[:,:,[-12,-11]] -= y_encoded[:,:,[-8,-7]] # cx(gt) - cx(anchor), cy(gt) - cy(anchor)
y_encoded[:,:,[-12,-11]] /= y_encoded[:,:,[-6,-5]] * y_encoded[:,:,[-4,-3]] # (cx(gt) - cx(anchor)) / w(anchor) / cx_variance, (cy(gt) - cy(anchor)) / h(anchor) / cy_variance
y_encoded[:,:,[-10,-9]] /= y_encoded[:,:,[-6,-5]] # w(gt) / w(anchor), h(gt) / h(anchor)
y_encoded[:,:,[-10,-9]] = np.log(y_encoded[:,:,[-10,-9]]) / y_encoded[:,:,[-2,-1]] # ln(w(gt) / w(anchor)) / w_variance, ln(h(gt) / h(anchor)) / h_variance (ln == natural logarithm)
elif self.coords == 'corners':
y_encoded[:,:,-12:-8] -= y_encoded[:,:,-8:-4] # (gt - anchor) for all four coordinates
y_encoded[:,:,[-12,-10]] /= np.expand_dims(y_encoded[:,:,-6] - y_encoded[:,:,-8], axis=-1) # (xmin(gt) - xmin(anchor)) / w(anchor), (xmax(gt) - xmax(anchor)) / w(anchor)
y_encoded[:,:,[-11,-9]] /= np.expand_dims(y_encoded[:,:,-5] - y_encoded[:,:,-7], axis=-1) # (ymin(gt) - ymin(anchor)) / h(anchor), (ymax(gt) - ymax(anchor)) / h(anchor)
y_encoded[:,:,-12:-8] /= y_encoded[:,:,-4:] # (gt - anchor) / size(anchor) / variance for all four coordinates, where 'size' refers to w and h respectively
elif self.coords == 'minmax':
y_encoded[:,:,-12:-8] -= y_encoded[:,:,-8:-4] # (gt - anchor) for all four coordinates
y_encoded[:,:,[-12,-11]] /= np.expand_dims(y_encoded[:,:,-7] - y_encoded[:,:,-8], axis=-1) # (xmin(gt) - xmin(anchor)) / w(anchor), (xmax(gt) - xmax(anchor)) / w(anchor)
y_encoded[:,:,[-10,-9]] /= np.expand_dims(y_encoded[:,:,-5] - y_encoded[:,:,-6], axis=-1) # (ymin(gt) - ymin(anchor)) / h(anchor), (ymax(gt) - ymax(anchor)) / h(anchor)
y_encoded[:,:,-12:-8] /= y_encoded[:,:,-4:] # (gt - anchor) / size(anchor) / variance for all four coordinates, where 'size' refers to w and h respectively
return y_encoded
def generate_anchor_boxes_for_layer(self,
feature_map_size,
aspect_ratios,
this_scale,
next_scale,
this_steps=None,
this_offsets=None):
'''
Arguments:
feature_map_size (tuple): [feature_map_height, feature_map_width]
aspect_ratios (list): 生成的anchor boxes的比例
this_scale (float),next_scale (float): A float in [0, 1]
Returns:
(feature_map_height, feature_map_width, n_boxes_per_cell, 4) 4:坐标
'''
size = min(self.img_height, self.img_width)
# 计算所有比例的box的宽和高
wh_list = []
for ar in aspect_ratios:
if (ar == 1):
box_height = box_width = this_scale * size
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
box_height = box_width = np.sqrt(this_scale * next_scale) * size
wh_list.append((box_width, box_height))
else:
box_width = this_scale * size * np.sqrt(ar)
box_height = this_scale * size / np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
n_boxes = len(wh_list) #每个格子(cell)中有多少boxes
# 计算box中心
if (this_steps is None):
step_height = self.img_height / feature_map_size[0]
step_width = self.img_width / feature_map_size[1]
else:
if isinstance(this_steps, (list, tuple)) and (len(this_steps) == 2):
step_height = this_steps[0]
step_width = this_steps[1]
elif isinstance(this_steps, (int, float)):
step_height = this_steps
step_width = this_steps
# this_offsets:anchor box 中心距左上角的像素值
if (this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(this_offsets, (list, tuple)) and (len(this_offsets) == 2):
offset_height = this_offsets[0]
offset_width = this_offsets[1]
elif isinstance(this_offsets, (int, float)):
offset_height = this_offsets
offset_width = this_offsets
# 计算default box中心坐标
cy = np.linspace(offset_height * step_height, (offset_height + feature_map_size[0] - 1) * step_height, feature_map_size[0])
cx = np.linspace(offset_width * step_width, (offset_width + feature_map_size[1] - 1) * step_width, feature_map_size[1])
cx_grid, cy_grid = np.meshgrid(cx, cy) #生成网格
cx_grid = np.expand_dims(cx_grid, -1) # np.tile()
cy_grid = np.expand_dims(cy_grid, -1)
# (feature_map_height, feature_map_width, n_boxes, 4) 最后一维4:(cx, cy, w, h)`
boxes_tensor = np.zeros((feature_map_size[0], feature_map_size[1], n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, n_boxes)) # cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, n_boxes)) # cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # h
# 将 (cx, cy, w, h) 转换成 (xmin, ymin, xmax, ymax)格式
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='centroids2corners')
# 剪裁超出图像边界的boxes
if self.clip_boxes:
x_coords = boxes_tensor[:,:,:,[0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:,:,:,[0, 2]] = x_coords
y_coords = boxes_tensor[:,:,:,[1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:,:,:,[1, 3]] = y_coords
# 将坐标归一化到 [0,1]
if self.normalize_coords:
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
if self.coords == 'centroids':
# (xmin, ymin, xmax, ymax)->(cx, cy, w, h)
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2centroids', border_pixels='half')
elif self.coords == 'minmax':
# (xmin, ymin, xmax, ymax)->(xmin, xmax, ymin, ymax).
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2minmax', border_pixels='half')
return boxes_tensor
def generate_encoding_template(self, batch_size):
'''
这个函数中所有的tensor创建,reshape,concatenation操作以及调用的子函数都和SSD模型中的相同
list 层 每层为`(feature_map_height, feature_map_width, n_boxes, 4)`.->`(batch_size, #boxes, #classes + 12)`
Arguments:
batch_size (int): The batch size.
Returns:
一个shape为 `(batch_size, #boxes, #classes + 12)`的数组,编码ground truth的标签的模板。最后一个轴的长度是
`#classes + 12`,因为模型的输出不止是4个预测的坐标偏移值,还有4个default boxes的坐标,以及4个variance values.
'''
#1. anchor boxes
boxes_batch = []
for boxes in self.boxes_list:
# self.boxes_list list (feature_map_height, feature_map_width, n_boxes, 4)
# 5D tensor `(batch_size, feature_map_height, feature_map_width, n_boxes, 4)`
boxes = np.expand_dims(boxes, axis=0)
boxes = np.tile(boxes, (batch_size, 1, 1, 1, 1))
# 5D tensor -> 3D tensor `(batch, feature_map_height * feature_map_width * n_boxes, 4)`.
boxes = np.reshape(boxes, (batch_size, -1, 4))
boxes_batch.append(boxes)
# (batch, sum_per_predict_layer(feature_map_height * feature_map_width * n_boxes), 4)
boxes_tensor = np.concatenate(boxes_batch, axis=1)
# 2: one-hot class encodings `(batch, #boxes, #classes)`
classes_tensor = np.zeros((batch_size, boxes_tensor.shape[1], self.n_classes))
# 3: variances. 和 `boxes_tensor` shape相同,只是简单的在最后一维的每个位置包含了4 variance值.
variances_tensor = | np.zeros_like(boxes_tensor) | numpy.zeros_like |
import os
import sys
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import BoundaryNorm
import numpy as np
import numpy.ma as ma
from helperFunctions import temperature_array_from_result
from postProcessing import surface_array_from_file
CMAP = cm.viridis
def plot_3d_surface(a, params, title, filepath):
fig = plt.figure()
ax = fig.gca(projection='3d')
COORD_NODE_FIRST = params['COORD_NODE_FIRST']
COORD_NODE_LAST = params['COORD_NODE_LAST']
DIM = params['N_NODES']
x, y = np.meshgrid(np.linspace(COORD_NODE_FIRST[0], COORD_NODE_LAST[0],
DIM[0]),
np.linspace(COORD_NODE_FIRST[1], COORD_NODE_LAST[1],
DIM[1]))
# Label for axis.
ax.set_xlabel('x in m')
ax.set_ylabel('y in m')
# Title.
#fig.suptitle('Surface Temperature in deg C for\n' + title, fontsize=12)
# Plot surface.
surf = ax.plot_surface(x, y, a, cmap=CMAP,
linewidth=0, antialiased=False)
# Customize z axis.
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
ticks = np.linspace(a.min(), a.max(), 11)
cbar = fig.colorbar(surf, ticks=ticks, orientation='vertical', shrink=0.75,
aspect=20)
cbar.set_label('\nTemperature in °C')
# Invert z axis since the relevant part is colder than the other part
# and therefore hard to see.
ax.invert_zaxis()
# Save plot to file.
print('Save figure to {}.'.format(filepath))
plt.savefig(filepath, bbox_inches='tight')
plt.gcf().clear()
plt.close()
def plot_heatmap(a, params, title, filepath):
COORD_NODE_FIRST = params['COORD_NODE_FIRST']
COORD_NODE_LAST = params['COORD_NODE_LAST']
DIM = params['N_NODES']
TUMOR_CENTER = params['TUMOR_CENTER']
RADIUS = params['PARAMETERS']['DIAMETER']/2
x, y = np.meshgrid(np.linspace(COORD_NODE_FIRST[0], COORD_NODE_LAST[0],
DIM[0]),
np.linspace(COORD_NODE_FIRST[1], COORD_NODE_LAST[1],
DIM[1]))
# Plot heatmap with circle around hole.
fig, ax = plt.subplots()
heatmap = ax.pcolormesh(x, y, a, cmap=CMAP, rasterized=True)
try:
pts = params['HOLE']
ax.plot(pts[:,0], pts[:,1], color='r', linestyle='dashed')
except KeyError:
circle = plt.Circle((TUMOR_CENTER[0], TUMOR_CENTER[1]), RADIUS,
color='r', fill=False, linestyle='dashed')
ax.add_artist(circle)
# Title.
#fig.suptitle('Heatmap in deg C for\n' + title, fontsize=12)
# Label for axis.
ax.set_xlabel('x in m')
ax.set_ylabel('y in m')
# Add a color bar which maps values to colors.
ticks = np.linspace(a.min(), a.max(), 11)
cbar = fig.colorbar(heatmap, ticks=ticks, orientation='vertical',
shrink=0.75, aspect=20)
cbar.set_label('\nTemperature in °C')
# Equal gridsize.
plt.gca().set_aspect('equal', adjustable='box')
# Save plot to file.
print('Save figure to {}.'.format(filepath))
plt.savefig(filepath, bbox_inches='tight')
plt.gcf().clear()
plt.close()
def plot_heatmap_scaled(temp, params, title, filepath):
surface = surface_array_from_file(params['NAME_INITFILE'])
skull = surface[-1,:,:]
if np.count_nonzero(skull == 1) != 0:
temp[np.where(skull == 0)] = float('nan')
temp = ma.masked_where(np.isnan(temp), temp)
else:
print('No open surface specified.')
COORD_NODE_FIRST = params['COORD_NODE_FIRST']
COORD_NODE_LAST = params['COORD_NODE_LAST']
DIM = params['N_NODES']
TUMOR_CENTER = params['TUMOR_CENTER']
RADIUS = params['PARAMETERS']['DIAMETER']/2
x, y = np.meshgrid(np.linspace(COORD_NODE_FIRST[0], COORD_NODE_LAST[0],
DIM[0]),
np.linspace(COORD_NODE_FIRST[1], COORD_NODE_LAST[1],
DIM[1]))
fig, ax = plt.subplots()
heatmap = ax.pcolormesh(x, y, temp, cmap=CMAP, rasterized=True)
#fig.suptitle('Heatmap in deg C for\n' + title, fontsize=12)
ax.set_xlabel('x in m')
ax.set_ylabel('y in m')
ticks = np.linspace(temp.min(), temp.max(), 11)
cbar = fig.colorbar(heatmap, ticks=ticks, orientation='vertical',
shrink=0.75, aspect=20)
cbar.set_label('\nTemperature in °C')
plt.gca().set_aspect('equal', adjustable='box')
print('Save figure to {}.'.format(filepath))
plt.savefig(filepath, bbox_inches='tight')
plt.gcf().clear()
plt.close()
def plot_heatmap_thermo_scaled(temp, params, title, filepath, csv_min, csv_max):
TICKS = np.linspace(csv_min, csv_max, 10)
LEVELS = MaxNLocator(nbins=1000).tick_values(csv_min, csv_max)
NORM = BoundaryNorm(LEVELS, ncolors=CMAP.N, clip=True)
surface = surface_array_from_file(params['NAME_INITFILE'])
skull = surface[-1,:,:]
if np.count_nonzero(skull == 1) != 0:
temp[np.where(skull == 0)] = float('nan')
temp = ma.masked_where(np.isnan(temp), temp)
else:
print('No open surface specified.')
COORD_NODE_FIRST = params['COORD_NODE_FIRST']
COORD_NODE_LAST = params['COORD_NODE_LAST']
DIM = params['N_NODES']
TUMOR_CENTER = params['TUMOR_CENTER']
RADIUS = params['PARAMETERS']['DIAMETER']/2
x, y = np.meshgrid(np.linspace(COORD_NODE_FIRST[0], COORD_NODE_LAST[0],
DIM[0]),
np.linspace(COORD_NODE_FIRST[1], COORD_NODE_LAST[1],
DIM[1]))
fig, ax = plt.subplots()
heatmap = ax.pcolormesh(x, y, temp, cmap=CMAP, norm=NORM, rasterized=True)
ax.set_xlabel('x in m')
ax.set_ylabel('y in m')
cbar = fig.colorbar(heatmap, ticks=TICKS, orientation='vertical',
shrink=0.75, aspect=20)
cbar.set_label('\nTemperature in °C')
plt.gca().set_aspect('equal', adjustable='box')
print('Save figure to {}.'.format(filepath))
plt.savefig(filepath, bbox_inches='tight')
plt.gcf().clear()
plt.close()
def plot_tumor(a, params, title, filepath):
COORD_NODE_FIRST = params['COORD_NODE_FIRST']
COORD_NODE_LAST = params['COORD_NODE_LAST']
DIM = params['N_NODES']
TUMOR_CENTER = params['TUMOR_CENTER']
RADIUS = params['PARAMETERS']['DIAMETER']/2
x, z = np.meshgrid(np.linspace(COORD_NODE_FIRST[0], COORD_NODE_LAST[0],
DIM[0]),
np.linspace(COORD_NODE_FIRST[2], COORD_NODE_LAST[2],
DIM[2]))
# Plot heatmap with circle around tumor.
fig, ax = plt.subplots()
heatmap = ax.pcolormesh(x, z, a, cmap=CMAP, rasterized=True)
if params['USE_MRI_FILE'] == False:
circle = plt.Circle((TUMOR_CENTER[0], TUMOR_CENTER[2]), RADIUS, color='r',
fill=False, linestyle='dashed')
ax.add_artist(circle)
# Title.
#fig.suptitle('Heatmap in deg C for\n' + title, fontsize=12)
# Customize z axis.
ax.set_xlabel('x in m')
ax.set_ylabel('z in m')
# Add a color bar which maps values to colors.
ticks = np.linspace(a.min(), a.max(), 11)
cbar = fig.colorbar(heatmap, ticks=ticks, orientation='vertical',
shrink=0.75, aspect=20)
cbar.set_label('\nTemperature in °C')
# Equal gridsize.
plt.gca().set_aspect('equal', adjustable='box')
# Save plot to file.
print('Save figure to {}.'.format(filepath))
plt.savefig(filepath, bbox_inches='tight')
plt.gcf().clear()
plt.close()
def plot_thermo(case, folder):
filepath = case + '_thermo.eps'
a = np.genfromtxt(os.path.join(folder, 'thermo.csv'), delimiter=',')
a[np.isnan(a)] = 0
rows = np.any(a, axis=1)
cols = np.any(a, axis=0)
rmin, rmax = | np.where(rows) | numpy.where |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy
import pytest
import scipy
import cirq
import openfermion
import openfermioncirq as ofc
from openfermioncirq.gates.four_qubit_gates import (
state_swap_eigen_component)
def test_state_swap_eigen_component_args():
with pytest.raises(TypeError):
state_swap_eigen_component(0, '12', 1)
with pytest.raises(ValueError):
state_swap_eigen_component('01', '01', 1)
with pytest.raises(ValueError):
state_swap_eigen_component('01', '10', 0)
with pytest.raises(ValueError):
state_swap_eigen_component('01', '100', 1)
with pytest.raises(ValueError):
state_swap_eigen_component('01', 'ab', 1)
@pytest.mark.parametrize('index_pair,n_qubits', [
((0, 1), 2),
((0, 3), 2),
])
def test_state_swap_eigen_component(index_pair, n_qubits):
state_pair = tuple(format(i, '0' + str(n_qubits) + 'b') for i in index_pair)
i, j = index_pair
dim = 2 ** n_qubits
for sign in (-1, 1):
actual_component = state_swap_eigen_component(
state_pair[0], state_pair[1], sign)
expected_component = numpy.zeros((dim, dim))
expected_component[i, i] = expected_component[j, j] = 0.5
expected_component[i, j] = expected_component[j, i] = sign * 0.5
assert numpy.allclose(actual_component, expected_component)
def test_double_excitation_init_with_multiple_args_fails():
with pytest.raises(ValueError):
_ = ofc.DoubleExcitationGate(exponent=1.0, duration=numpy.pi/2)
def test_double_excitation_eq():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(
ofc.DoubleExcitationGate(exponent=1.5),
ofc.DoubleExcitationGate(exponent=-0.5),
ofc.DoubleExcitationGate(rads=-0.5 * numpy.pi),
ofc.DoubleExcitationGate(degs=-90),
ofc.DoubleExcitationGate(duration=-0.5 * numpy.pi / 2))
eq.add_equality_group(
ofc.DoubleExcitationGate(exponent=0.5),
ofc.DoubleExcitationGate(exponent=-1.5),
ofc.DoubleExcitationGate(rads=0.5 * numpy.pi),
ofc.DoubleExcitationGate(degs=90),
ofc.DoubleExcitationGate(duration=-1.5 * numpy.pi / 2))
eq.make_equality_group(lambda: ofc.DoubleExcitationGate(exponent=0.0))
eq.make_equality_group(lambda: ofc.DoubleExcitationGate(exponent=0.75))
def test_double_excitation_consistency():
ofc.testing.assert_implements_consistent_protocols(
ofc.DoubleExcitation)
def test_combined_double_excitation_consistency():
ofc.testing.assert_implements_consistent_protocols(
ofc.CombinedDoubleExcitationGate())
@pytest.mark.parametrize('weights', numpy.random.rand(10, 3))
def test_weights_and_exponent(weights):
exponents = numpy.linspace(-1, 1, 8)
gates = tuple(
ofc.CombinedDoubleExcitationGate(weights / exponent,
exponent=exponent)
for exponent in exponents)
for g1 in gates:
for g2 in gates:
assert cirq.approx_eq(g1, g2, atol=1e-100)
for i, (gate, exponent) in enumerate(zip(gates, exponents)):
assert gate.exponent == 1
new_exponent = exponents[-i]
new_gate = gate._with_exponent(new_exponent)
assert new_gate.exponent == new_exponent
double_excitation_simulator_test_cases = [
(ofc.DoubleExcitation, 1.0,
numpy.array([1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, 1, 1, -1, 1, 1, 1, 1,
1, 1, 1, 1, -1, 1, 1, 1]) / 4.,
5e-6),
(ofc.DoubleExcitation, -1.0,
numpy.array([1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, 1, 1, -1, 1, 1, 1, 1,
1, 1, 1, 1, -1, 1, 1, 1]) / 4.,
5e-6),
(ofc.DoubleExcitation, 0.5,
numpy.array([1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0]) / numpy.sqrt(8),
numpy.array([1, 1, 1, 0, 1, 1, 1, 1,
0, 0, 0, 0, 1j, 0, 0, 0]) / numpy.sqrt(8),
5e-6),
(ofc.DoubleExcitation, -0.5,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, -1, -1, -1j, -1, -1, 1, 1,
1, 1, 1, 1, 1j, 1, 1, 1]) / 4.,
5e-6),
(ofc.DoubleExcitation, -1. / 7,
numpy.array([1, 1j, -1j, -1, 1, 1j, -1j, -1,
1, 1j, -1j, -1, 1, 1j, -1j, -1]) / 4.,
numpy.array([1, 1j, -1j,
-numpy.cos(numpy.pi / 7) - 1j * numpy.sin(numpy.pi / 7),
1, 1j, -1j, -1, 1, 1j, -1j, -1,
numpy.cos(numpy.pi / 7) + 1j * numpy.sin(numpy.pi / 7),
1j, -1j, -1]) / 4.,
5e-6),
(ofc.DoubleExcitation, 7. / 3,
numpy.array([0, 0, 0, 2,
(1 + 1j) / numpy.sqrt(2), (1 - 1j) / numpy.sqrt(2),
-(1 + 1j) / numpy.sqrt(2), -1,
1, 1j, -1j, -1,
1, 1j, -1j, -1]) / 4.,
numpy.array([0, 0, 0, 1 + 1j * numpy.sqrt(3) / 2,
(1 + 1j) / numpy.sqrt(2), (1 - 1j) / numpy.sqrt(2),
-(1 + 1j) / numpy.sqrt(2), -1,
1, 1j, -1j, -1,
0.5 + 1j * numpy.sqrt(3), 1j, -1j, -1]) / 4.,
5e-6),
(ofc.DoubleExcitation, 0,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
5e-6),
(ofc.DoubleExcitation, 0.25,
numpy.array([1, 0, 0, -2, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 1]) / numpy.sqrt(15),
numpy.array([1, 0, 0, +3j / numpy.sqrt(2) - numpy.sqrt(2),
0, 0, 0, 0,
0, 0, 0, 0,
3 / numpy.sqrt(2) - 1j * numpy.sqrt(2), 0, 0, 1]) /
numpy.sqrt(15),
5e-6)
]
combined_double_excitation_simulator_test_cases = [
(ofc.CombinedDoubleExcitationGate((0, 0, 0)), 1.,
numpy.ones(16) / 4.,
numpy.ones(16) / 4.,
5e-6),
(ofc.CombinedDoubleExcitationGate((0.2, -0.1, 0.7)), 0.,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
5e-6),
(ofc.CombinedDoubleExcitationGate((0.2, -0.1, 0.7)), 0.3,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, -1, -1, -numpy.exp(-numpy.pi * 0.105j),
-1, -numpy.exp(-numpy.pi * 0.585j),
numpy.exp(numpy.pi * 0.03j), 1,
1, numpy.exp(numpy.pi * 0.03j),
numpy.exp(-numpy.pi * 0.585j), 1,
numpy.exp(-numpy.pi * 0.105j), 1, 1, 1]) / 4.,
5e-6),
(ofc.CombinedDoubleExcitationGate((1. / 3, 0, 0)), 1.,
numpy.array([0, 0, 0, 0, 0, 0, 1., 0,
0, 1., 0, 0, 0, 0, 0, 0]) / numpy.sqrt(2),
numpy.array([0, 0, 0, 0, 0, 0, 1., 0,
0, 1., 0, 0, 0, 0, 0, 0]) / numpy.sqrt(2),
5e-6),
(ofc.CombinedDoubleExcitationGate((0, -2. / 3, 0)), 1.,
numpy.array([1., 1., 0, 0, 0, 1., 0, 0,
0, 0., -1., 0, 0, 0, 0, 0]) / 2.,
numpy.array([1., 1., 0, 0, 0, -numpy.exp(4j * numpy.pi / 3), 0, 0,
0, 0., -numpy.exp(1j * numpy.pi / 3), 0, 0, 0, 0, 0]
) / 2.,
5e-6),
(ofc.CombinedDoubleExcitationGate((0, 0, 1)), 1.,
numpy.array([0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1., 0, 0, 0]),
numpy.array([0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]),
5e-6),
(ofc.CombinedDoubleExcitationGate((0, 0, 0.5)), 1.,
numpy.array([0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]),
numpy.array([0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1j, 0, 0, 0]) / numpy.sqrt(2),
5e-6),
(ofc.CombinedDoubleExcitationGate((0.5, -1./3, 1.)), 1.,
numpy.array([0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0]) / numpy.sqrt(3),
numpy.array([0, 0, 0, 1j, 0, -1j / 2., 1 / | numpy.sqrt(2) | numpy.sqrt |
#! /usr/bin/env python
"""
Aegean Residual (AeRes) has the following capability:
- convert a catalogue into an image model
- subtract image model from image
- write model and residual files
"""
__author__ = "<NAME>"
import logging
import numpy as np
from astropy.io import fits
from AegeanTools import catalogs, fitting, wcs_helpers
FWHM2CC = 1 / (2 * np.sqrt(2 * np.log(2)))
def load_sources(filename,
ra_col='ra', dec_col='dec',
peak_col='peak_flux',
a_col='a', b_col='b', pa_col='pa'):
"""
Open a file, read contents, return a list of all the sources in that file.
Parameters
----------
filename : str
Filename to be read
ra_col, dec_col, peak_col, a_col, b_col, pa_col : str
The column names for each of the parameters.
Default = ['ra', 'dec', 'peak_flux', 'a', 'b', 'pa']
Return
------
catalog : [`class:AegeanTools.models.ComponentSource`, ...]
A list of source components
"""
table = catalogs.load_table(filename)
required_cols = [ra_col, dec_col, peak_col, a_col, b_col, pa_col]
#required_cols = ['ra','dec','peak_flux','a','b','pa']
good = True
for c in required_cols:
if c not in table.colnames:
logging.error("Column {0} not found".format(c))
good = False
if not good:
logging.error("Some required columns missing or mis-labeled")
return None
# rename the table columns
for old, new in zip([ra_col, dec_col, peak_col, a_col, b_col, pa_col],
['ra', 'dec', 'peak_flux', 'a', 'b', 'pa']):
table.rename_column(old, new)
catalog = catalogs.table_to_source_list(table)
logging.info("read {0} sources from {1}".format(len(catalog), filename))
return catalog
def make_model(sources, shape, wcshelper, mask=False, frac=None, sigma=4):
"""
Create a model image based on a catalogue of sources.
Parameters
----------
sources : [`class:AegeanTools.models.ComponentSource`, ...]
a list of sources
shape : [float, float]
the shape of the input (and output) image
wcshelper : 'class:AegeanTools.wcs_helpers.WCSHelper'
A WCSHelper object corresponding to the input image
mask : bool
If true then mask pixels instead of subtracting or adding sources
frac : float
pixels that are brighter than frac*peak_flux for each source will be masked if mask=True
sigma: float
pixels that are brighter than rms*sigma be masked if mask=True
Returns
-------
model : np.ndarray
The desired model.
"""
# Model array
m = np.zeros(shape, dtype=np.float32)
factor = 5
i_count = 0
for src in sources:
xo, yo, sx, sy, theta = wcshelper.sky2pix_ellipse([src.ra, src.dec], src.a/3600, src.b/3600, src.pa)
phi = np.radians(theta)
# skip sources that have a center that is outside of the image
if not 0 < xo < shape[0]:
logging.debug("source {0} is not within image".format(src.island))
continue
if not 0 < yo < shape[1]:
logging.debug("source {0} is not within image".format(src.island))
continue
# pixels over which this model is calculated
xoff = factor*(abs(sx*np.cos(phi)) + abs(sy*np.sin(phi)))
xmin = xo - xoff
xmax = xo + xoff
yoff = factor*(abs(sx*np.sin(phi)) + abs(sy*np.cos(phi)))
ymin = yo - yoff
ymax = yo + yoff
# clip to the image size
ymin = max(np.floor(ymin), 0)
ymax = min(np.ceil(ymax), shape[1])
xmin = max(np.floor(xmin), 0)
xmax = min( | np.ceil(xmax) | numpy.ceil |
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from <EMAIL>. #
##############################################################################
import unittest
import sys
import json
import numpy as np
sys.path.append('../..')
from hsds.util.dsetUtil import getHyperslabSelection
from hsds.util.chunkUtil import guessChunk, getNumChunks, getChunkIds, getChunkId, getPartitionKey, getChunkPartition
from hsds.util.chunkUtil import getChunkIndex, getChunkSelection, getChunkCoverage, getDataCoverage, ChunkIterator
from hsds.util.chunkUtil import getChunkSize, shrinkChunk, expandChunk, getDatasetId, getContiguousLayout, _getEvalStr
from hsds.util.chunkUtil import chunkReadSelection, chunkWriteSelection, chunkReadPoints, chunkWritePoints, chunkQuery
class ChunkUtilTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ChunkUtilTest, self).__init__(*args, **kwargs)
# main
def testGuessChunk(self):
typesize = 'H5T_VARIABLE'
shape = {"class": 'H5S_NULL' }
layout = guessChunk(shape, typesize)
self.assertTrue(layout is None)
shape = {"class": 'H5S_SCALAR' }
layout = guessChunk(shape, typesize)
self.assertEqual(layout, (1,))
shape = {"class": 'H5S_SIMPLE', "dims": [100, 100]}
layout = guessChunk(shape, typesize)
self.assertTrue(len(layout), 2)
for i in range(2):
self.assertTrue(layout[i] >= 1)
self.assertTrue(layout[i] <= 100)
typesize = 8
layout = guessChunk(shape, typesize)
self.assertTrue(len(layout), 2)
for i in range(2):
self.assertTrue(layout[i] >= 1)
self.assertTrue(layout[i] <= 100)
shape = {"class": 'H5S_SIMPLE', "dims": [5]}
layout = guessChunk(shape, typesize)
self.assertEqual(layout, (5,))
shape = {"class": 'H5S_SIMPLE', "dims": [100, 100, 100]}
layout = guessChunk(shape, typesize)
self.assertTrue(len(layout), 3)
for i in range(3):
self.assertTrue(layout[i] >= 1)
self.assertTrue(layout[i] <= 100)
shape = {"class": 'H5S_SIMPLE', "dims": [100, 0], "maxdims": [100, 0]}
layout = guessChunk(shape, typesize)
self.assertTrue(len(layout), 2)
for i in range(2):
self.assertTrue(layout[i] >= 1)
self.assertTrue(layout[i] <= 1024)
shape = {"class": 'H5S_SCALAR'}
layout = guessChunk(shape, typesize)
self.assertEqual(layout, (1,))
shape = {"class": 'H5S_NULL'}
layout = guessChunk(shape, typesize)
self.assertEqual(layout, None)
def testShrinkChunk(self):
CHUNK_MIN = 500
CHUNK_MAX = 5000
typesize = 1
layout = (1, 2, 3)
shrunk = shrinkChunk(layout, typesize, chunk_max=CHUNK_MAX)
self.assertEqual(shrunk, layout)
layout = (100, 200, 300)
num_bytes = getChunkSize(layout, typesize)
self.assertTrue(num_bytes > CHUNK_MAX)
shrunk = shrinkChunk(layout, typesize, chunk_max=CHUNK_MAX)
rank = len(layout)
for i in range(rank):
self.assertTrue(shrunk[i] >= 1)
self.assertTrue(shrunk[i] <= 1000*(i+1))
num_bytes = getChunkSize(shrunk, typesize)
self.assertTrue(num_bytes > CHUNK_MIN)
self.assertTrue(num_bytes < CHUNK_MAX)
layout = (300, 200, 100)
num_bytes = getChunkSize(layout, typesize)
self.assertTrue(num_bytes > CHUNK_MAX)
shrunk = shrinkChunk(layout, typesize, chunk_max=CHUNK_MAX)
rank = len(layout)
for i in range(rank):
self.assertTrue(shrunk[i] >= 1)
self.assertTrue(shrunk[i] <= 1000*(3-i))
num_bytes = getChunkSize(shrunk, typesize)
self.assertTrue(num_bytes > CHUNK_MIN)
self.assertTrue(num_bytes < CHUNK_MAX)
CHUNK_MIN=1*1024*1024
CHUNK_MAX=4*1024*1024
typesize=4
layout = (117, 201, 189, 1)
num_bytes = getChunkSize(layout, typesize)
self.assertTrue(num_bytes > CHUNK_MAX)
shrunk = shrinkChunk(layout, typesize, chunk_max=CHUNK_MAX)
self.assertEqual(shrunk, (59,101,95,1))
num_bytes = getChunkSize(shrunk, typesize)
self.assertTrue(num_bytes > CHUNK_MIN)
self.assertTrue(num_bytes < CHUNK_MAX)
def testExpandChunk(self):
CHUNK_MIN = 5000
CHUNK_MAX = 50000
typesize = 1
shape = {"class": 'H5S_SIMPLE', "dims": [10, 10, 10]}
layout = (10, 10, 10)
num_bytes = getChunkSize(layout, typesize)
self.assertTrue(num_bytes < CHUNK_MIN)
expanded = expandChunk(layout, typesize, shape, chunk_min=CHUNK_MIN)
num_bytes = getChunkSize(expanded, typesize)
# chunk layout can't be larger than dataspace
self.assertTrue(num_bytes < CHUNK_MIN)
self.assertEqual(expanded, (10, 10, 10))
shape = {"class": 'H5S_SIMPLE', "dims": [1000, 2000, 3000]}
layout = (10, 10, 10)
num_bytes = getChunkSize(layout, typesize)
self.assertTrue(num_bytes < CHUNK_MIN)
expanded = expandChunk(layout, typesize, shape, chunk_min=CHUNK_MIN)
num_bytes = getChunkSize(expanded, typesize)
self.assertTrue(num_bytes > CHUNK_MIN)
self.assertTrue(num_bytes < CHUNK_MAX)
shape = {"class": 'H5S_SIMPLE', "dims": [1000, 10, 1000], "maxdims": [1000, 100, 1000]}
layout = (10, 10, 10)
num_bytes = getChunkSize(layout, typesize)
self.assertTrue(num_bytes < CHUNK_MIN)
expanded = expandChunk(layout, typesize, shape, chunk_min=CHUNK_MIN)
num_bytes = getChunkSize(expanded, typesize)
self.assertTrue(num_bytes > CHUNK_MIN)
self.assertTrue(num_bytes < CHUNK_MAX)
shape = {"class": 'H5S_SIMPLE', "dims": [1000, 0, 1000], "maxdims": [1000, 100, 1000]}
layout = (10, 10, 10)
num_bytes = getChunkSize(layout, typesize)
self.assertTrue(num_bytes < CHUNK_MIN)
expanded = expandChunk(layout, typesize, shape, chunk_min=CHUNK_MIN)
num_bytes = getChunkSize(expanded, typesize)
self.assertTrue(num_bytes > CHUNK_MIN)
self.assertTrue(num_bytes < CHUNK_MAX)
shape = {"class": 'H5S_SIMPLE', "dims": [1000, 10, 1000], "maxdims": [1000, 0, 1000]}
layout = (10, 10, 10)
num_bytes = getChunkSize(layout, typesize)
self.assertTrue(num_bytes < CHUNK_MIN)
expanded = expandChunk(layout, typesize, shape, chunk_min=CHUNK_MIN)
num_bytes = getChunkSize(expanded, typesize)
self.assertTrue(num_bytes > CHUNK_MIN)
self.assertTrue(num_bytes < CHUNK_MAX)
def testGetContiguiousLayout(self):
typesize = 4
chunk_min=400
chunk_max=800
try:
shape = {"class": 'H5S_SIMPLE', "dims": [100, 100]}
layout = getContiguousLayout(shape, 'H5T_VARIABLE')
self.assertTrue(False)
except ValueError:
pass # expected
shape = {"class": 'H5S_NULL' }
layout = getContiguousLayout(shape, typesize)
self.assertTrue(layout is None)
shape = {"class": 'H5S_SCALAR' }
layout = getContiguousLayout(shape, typesize)
self.assertEqual(layout, (1,))
for extent in (1, 100, 10000):
dims = [extent,]
shape = {"class": 'H5S_SIMPLE', "dims": dims}
layout = getContiguousLayout(shape, typesize, chunk_min=chunk_min, chunk_max=chunk_max)
self.assertTrue(len(layout), 1)
chunk_bytes = layout[0]*typesize
space_bytes = extent*typesize
if space_bytes > chunk_min:
self.assertTrue(chunk_bytes >= chunk_min)
self.assertTrue(chunk_bytes <= chunk_max)
for extent in (1, 10, 100):
dims = [extent, extent]
shape = {"class": 'H5S_SIMPLE', "dims": dims}
layout = getContiguousLayout(shape, typesize, chunk_min=chunk_min, chunk_max=chunk_max)
self.assertTrue(len(layout), 2)
for i in range(2):
self.assertTrue(layout[i] >= 1)
self.assertTrue(layout[i] <= extent)
self.assertEqual(layout[1], extent)
chunk_bytes = layout[0]*layout[1]*typesize
space_bytes = extent*extent*typesize
if space_bytes > chunk_min:
self.assertTrue(chunk_bytes >= chunk_min)
self.assertTrue(chunk_bytes <= chunk_max)
for extent in (1, 10, 100):
dims = [extent, extent, 50]
shape = {"class": 'H5S_SIMPLE', "dims": dims}
layout = getContiguousLayout(shape, typesize, chunk_min=chunk_min, chunk_max=chunk_max)
self.assertTrue(len(layout), 3)
for i in range(3):
self.assertTrue(layout[i] >= 1)
self.assertTrue(layout[i] <= dims[i])
chunk_bytes = layout[0]*layout[1]*layout[2]*typesize
space_bytes = dims[0]*dims[1]*dims[2]*typesize
if space_bytes > chunk_min:
# chunk size maybe less than chunk_min in this case
# self.assertTrue(chunk_bytes >= chunk_min)
self.assertEqual(layout[0], 1)
self.assertTrue(chunk_bytes <= chunk_max)
def testGetNumChunks(self):
datashape = [100,]
layout = (10,)
selection = getHyperslabSelection(datashape)
count = getNumChunks(selection, layout)
self.assertEqual(count, 10)
selection = getHyperslabSelection(datashape, 12, 83)
count = getNumChunks(selection, layout)
self.assertEqual(count, 8)
selection = getHyperslabSelection(datashape, 12, 80)
count = getNumChunks(selection, layout)
self.assertEqual(count, 7)
selection = getHyperslabSelection(datashape, 10, 83)
count = getNumChunks(selection, layout)
self.assertEqual(count, 8)
selection = getHyperslabSelection(datashape, 12, 17)
count = getNumChunks(selection, layout)
self.assertEqual(count, 1)
# try with different increment
selection = getHyperslabSelection(datashape, 0, 10, 5)
count = getNumChunks(selection, layout)
self.assertEqual(count, 1)
selection = getHyperslabSelection(datashape, 0, 11, 5)
count = getNumChunks(selection, layout)
self.assertEqual(count, 2)
selection = getHyperslabSelection(datashape, 6, 11, 5)
count = getNumChunks(selection, layout)
self.assertEqual(count, 1)
selection = getHyperslabSelection(datashape, 12, 83, 2)
count = getNumChunks(selection, layout)
self.assertEqual(count, 8)
selection = getHyperslabSelection(datashape, 12, 83, 20)
count = getNumChunks(selection, layout)
self.assertEqual(count, 4)
selection = getHyperslabSelection(datashape, 10, 83, 20)
count = getNumChunks(selection, layout)
self.assertEqual(count, 4)
datashape = [100,100]
layout = (10,5)
selection = getHyperslabSelection(datashape)
count = getNumChunks(selection, layout)
self.assertEqual(count, 200)
selection = getHyperslabSelection(datashape, (41, 6), (49, 9))
count = getNumChunks(selection, layout)
self.assertEqual(count, 1)
selection = getHyperslabSelection(datashape, (39, 4), (47, 7))
count = getNumChunks(selection, layout)
self.assertEqual(count, 4)
# try with different increment
selection = getHyperslabSelection(datashape, (39, 4), (47, 7), (3, 2))
count = getNumChunks(selection, layout)
self.assertEqual(count, 4)
selection = getHyperslabSelection(datashape, (0, 0), (100, 100), (20, 40))
count = getNumChunks(selection, layout)
self.assertEqual(count, 15)
def testGetChunkIds(self):
# getChunkIds(dset_id, selection, layout, dim=0, prefix=None, chunk_ids=None):
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
datashape = [1,]
layout = (1,)
selection = getHyperslabSelection(datashape)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 1)
chunk_id = chunk_ids[0]
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_0'))
self.assertEqual(chunk_id[2:-2], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+2)
self.assertEqual(getDatasetId(chunk_id), dset_id)
datashape = [100,]
layout = (10,)
selection = getHyperslabSelection(datashape)
chunk_ids = getChunkIds(dset_id, selection, layout)
partition_count = 10
self.assertEqual(len(chunk_ids), 10)
for i in range(10):
chunk_id = chunk_ids[i]
# chunk_id should look like:
# c-12345678-1234-1234-1234-1234567890ab_n
# where 'n' is in the range 0-9
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_' + str(i)))
self.assertEqual(chunk_id[2:-2], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+2)
chunk_id = getPartitionKey(chunk_id, partition_count)
partition = getChunkPartition(chunk_id)
self.assertTrue(partition is not None)
self.assertTrue(partition >= 0)
self.assertTrue(partition < partition_count)
selection = getHyperslabSelection(datashape, 20)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 8)
for i in range(8):
chunk_id = chunk_ids[i]
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_' + str(i+2)))
self.assertEqual(chunk_id[2:-2], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+2)
selection = getHyperslabSelection(datashape, 20, 81)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 7)
for i in range(7):
chunk_id = chunk_ids[i]
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_' + str(i+2)))
self.assertEqual(chunk_id[2:-2], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+2)
selection = getHyperslabSelection(datashape, 29, 81)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 7)
for i in range(7):
chunk_id = chunk_ids[i]
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_' + str(i+2)))
self.assertEqual(chunk_id[2:-2], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+2)
selection = getHyperslabSelection(datashape, 29, 81, 2)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 6)
for i in range(6):
chunk_id = chunk_ids[i]
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_' + str(i+2)))
self.assertEqual(chunk_id[2:-2], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+2)
selection = getHyperslabSelection(datashape, 29, 81, 20)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 3)
for i in range(3):
chunk_id = chunk_ids[i]
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_' + str(i*2+2)))
self.assertEqual(chunk_id[2:-2], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+2)
datashape = [3207353,]
layout = (60000,)
selection = getHyperslabSelection(datashape, 1234567, 1234568)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 1)
self.assertTrue(chunk_ids[0].endswith("_20") )
datashape = [100,100]
layout = (10,20)
selection = getHyperslabSelection(datashape)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 50)
chunk_ids.reverse() # so we can pop off the front
for i in range(10):
for j in range(5):
chunk_id = chunk_ids.pop()
self.assertTrue(chunk_id.startswith("c-"))
index1 = int(chunk_id[-3])
index2 = int(chunk_id[-1])
self.assertEqual(index1, i)
self.assertEqual(index2, j)
selection = getHyperslabSelection(datashape, (12, 23),(88,80))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 24)
chunk_ids.reverse() # so we can pop off the front
for i in range(8):
for j in range(3):
chunk_id = chunk_ids.pop()
self.assertTrue(chunk_id.startswith("c-"))
index1 = int(chunk_id[-3])
index2 = int(chunk_id[-1])
self.assertEqual(index1, i+1)
self.assertEqual(index2, j+1)
selection = getHyperslabSelection(datashape, (12, 23),(88,80), (6,16))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 24)
chunk_ids.reverse() # so we can pop off the front
for i in range(8):
for j in range(3):
chunk_id = chunk_ids.pop()
self.assertTrue(chunk_id.startswith("c-"))
index1 = int(chunk_id[-3])
index2 = int(chunk_id[-1])
self.assertEqual(index1, i+1)
self.assertEqual(index2, j+1)
selection = getHyperslabSelection(datashape, (12, 23),(88,80), (16,44))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 10)
chunk_ids.reverse() # so we can pop off the front
xindex = (1,2,4,6,7)
yindex = (1,3)
for i in range(5):
for j in range(2):
chunk_id = chunk_ids.pop()
self.assertTrue(chunk_id.startswith("c-"))
index1 = int(chunk_id[-3])
index2 = int(chunk_id[-1])
self.assertEqual(index1, xindex[i])
self.assertEqual(index2, yindex[j])
# 3d test
datashape = [365, 720, 1440]
layout = (2, 180, 720)
selection = getHyperslabSelection(datashape, (0, 0, 0), (1, 720, 1440))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 8)
chunk_ids.reverse() # so we can pop off the front
for i in range(4):
for j in range(2):
chunk_id = chunk_ids.pop()
self.assertTrue(chunk_id.startswith("c-"))
index0 = int(chunk_id[-5])
index1 = int(chunk_id[-3])
index2 = int(chunk_id[-1])
self.assertEqual(index0, 0)
self.assertEqual(index1, i)
self.assertEqual(index2, j)
selection = getHyperslabSelection(datashape, (0, 0, 0), (1, 720, 1440), (1, 25, 25))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 8)
chunk_ids.reverse() # so we can pop off the front
for i in range(4):
for j in range(2):
chunk_id = chunk_ids.pop()
self.assertTrue(chunk_id.startswith("c-"))
index0 = int(chunk_id[-5])
index1 = int(chunk_id[-3])
index2 = int(chunk_id[-1])
self.assertEqual(index0, 0)
self.assertEqual(index1, i)
self.assertEqual(index2, j)
# 2d test - laarge number of chunks
datashape = [7639, 6307]
layout = (1, 6308)
selection = getHyperslabSelection(datashape, (0, 0), (7639, 6307))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 7639)
chunk_ids.reverse() # so we can pop off the front
for i in range(7639):
chunk_id = chunk_ids.pop()
self.assertTrue(chunk_id.startswith("c-"))
fields = chunk_id.split('_')
self.assertEqual(len(fields), 3)
index1 = int(fields[1])
index2 = int(fields[2])
self.assertEqual(index1, i)
self.assertEqual(index2, 0)
def testGetChunkIndex(self):
chunk_id = "c-12345678-1234-1234-1234-1234567890ab_6_4"
index = getChunkIndex(chunk_id)
self.assertEqual(index, [6,4])
chunk_id = "c-12345678-1234-1234-1234-1234567890ab_64"
index = getChunkIndex(chunk_id)
self.assertEqual(index, [64,])
def testGetChunkSelection(self):
# 1-d test
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
datashape = [100,]
layout = (10,)
selection = getHyperslabSelection(datashape, 42, 62)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 3)
chunk_id = chunk_ids[0]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 42)
self.assertEqual(sel[0].stop, 50)
self.assertEqual(sel[0].step, 1)
chunk_id = chunk_ids[1]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 50)
self.assertEqual(sel[0].stop, 60)
self.assertEqual(sel[0].step, 1)
chunk_id = chunk_ids[2]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 60)
self.assertEqual(sel[0].stop, 62)
self.assertEqual(sel[0].step, 1)
# 1-d with step
selection = getHyperslabSelection(datashape, 42, 62, 4)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 2)
chunk_id = chunk_ids[0]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 42)
self.assertEqual(sel[0].stop, 47)
self.assertEqual(sel[0].step, 4)
chunk_id = chunk_ids[1]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 50)
self.assertEqual(sel[0].stop, 59)
self.assertEqual(sel[0].step, 4)
# another 1-d with step
selection = getHyperslabSelection(datashape, 40, 63, 2)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 3)
chunk_id = chunk_ids[0]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 40)
self.assertEqual(sel[0].stop, 49)
self.assertEqual(sel[0].step, 2)
chunk_id = chunk_ids[1]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 50)
self.assertEqual(sel[0].stop, 59)
self.assertEqual(sel[0].step, 2)
chunk_id = chunk_ids[2]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 60)
self.assertEqual(sel[0].stop, 63)
self.assertEqual(sel[0].step, 2)
# test with step > chunk size
selection = getHyperslabSelection(datashape, 0, 100, 15)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 7)
chunk_id = chunk_ids[0]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 1)
self.assertEqual(sel[0].step, 15)
chunk_id = chunk_ids[1]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 15)
self.assertEqual(sel[0].stop, 16)
self.assertEqual(sel[0].step, 15)
chunk_id = chunk_ids[2]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 30)
self.assertEqual(sel[0].stop, 31)
self.assertEqual(sel[0].step, 15)
# 2-d test
datashape = [100,100]
layout = (10,10)
selection = getHyperslabSelection(datashape, (42, 46), (52, 58))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 4)
chunk_id = chunk_ids[0]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 42)
self.assertEqual(sel[0].stop, 50)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 46)
self.assertEqual(sel[1].stop, 50)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[1]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 42)
self.assertEqual(sel[0].stop, 50)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 50)
self.assertEqual(sel[1].stop, 58)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[2]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 50)
self.assertEqual(sel[0].stop, 52)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 46)
self.assertEqual(sel[1].stop, 50)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[3]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 50)
self.assertEqual(sel[0].stop, 52)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 50)
self.assertEqual(sel[1].stop, 58)
self.assertEqual(sel[1].step, 1)
# 1-d test with fractional chunks
datashape = [104,]
layout = (10,)
selection = getHyperslabSelection(datashape, 92, 102)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 2)
chunk_id = chunk_ids[0]
sel = getChunkSelection(chunk_id, selection, layout)
sel = sel[0]
self.assertEqual(sel.start, 92)
self.assertEqual(sel.stop, 100)
self.assertEqual(sel.step, 1)
chunk_id = chunk_ids[1]
sel = getChunkSelection(chunk_id, selection, layout)
sel = sel[0]
self.assertEqual(sel.start, 100)
self.assertEqual(sel.stop, 102)
self.assertEqual(sel.step, 1)
# 3d test
datashape = [365, 720, 1440]
layout = (2, 180, 720)
selection = getHyperslabSelection(datashape, (0, 0, 0), (1, 200, 300))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 2)
chunk_id = chunk_ids[0]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 1)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 0)
self.assertEqual(sel[1].stop, 180)
self.assertEqual(sel[1].step, 1)
self.assertEqual(sel[2].start, 0)
self.assertEqual(sel[2].stop, 300)
self.assertEqual(sel[2].step, 1)
chunk_id = chunk_ids[1]
sel = getChunkSelection(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 1)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 180)
self.assertEqual(sel[1].stop, 200)
self.assertEqual(sel[1].step, 1)
self.assertEqual(sel[2].start, 0)
self.assertEqual(sel[2].stop, 300)
self.assertEqual(sel[2].step, 1)
def testGetChunkCoverage(self):
# 1-d test
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
datashape = [100,]
layout = (10,)
selection = getHyperslabSelection(datashape, 42, 62)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 3)
chunk_id = chunk_ids[0]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 2)
self.assertEqual(sel[0].stop, 10)
self.assertEqual(sel[0].step, 1)
chunk_id = chunk_ids[1]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 10)
self.assertEqual(sel[0].step, 1)
chunk_id = chunk_ids[2]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 2)
self.assertEqual(sel[0].step, 1)
# 1-d with step
selection = getHyperslabSelection(datashape, 42, 62, 4)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 2)
chunk_id = chunk_ids[0]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 2)
self.assertEqual(sel[0].stop, 7)
self.assertEqual(sel[0].step, 4)
chunk_id = chunk_ids[1]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 9)
self.assertEqual(sel[0].step, 4)
# 2-d test
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
datashape = [100,100]
layout = (10,10)
selection = getHyperslabSelection(datashape, (42, 46), (52, 58))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 4)
chunk_id = chunk_ids[0]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 2)
self.assertEqual(sel[0].stop, 10)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 6)
self.assertEqual(sel[1].stop, 10)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[1]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 2)
self.assertEqual(sel[0].stop, 10)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 0)
self.assertEqual(sel[1].stop, 8)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[2]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 2)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 6)
self.assertEqual(sel[1].stop, 10)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[3]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 2)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 0)
self.assertEqual(sel[1].stop, 8)
self.assertEqual(sel[1].step, 1)
# 2-d test - non-even chunks at boundry
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
datashape = [45,54]
layout = (10,10)
selection = getHyperslabSelection(datashape, (22, 2), (23, 52))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 6)
chunk_id = chunk_ids[0]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 2)
self.assertEqual(sel[0].stop, 3)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 2)
self.assertEqual(sel[1].stop, 10)
self.assertEqual(sel[1].step, 1)
# the next 4 chunks will have same selection
for i in range(1,4):
chunk_id = chunk_ids[i]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 2)
self.assertEqual(sel[0].stop, 3)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 0)
self.assertEqual(sel[1].stop, 10)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[5]
sel = getChunkCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 2)
self.assertEqual(sel[0].stop, 3)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 0)
self.assertEqual(sel[1].stop, 2)
self.assertEqual(sel[1].step, 1)
# 1-d test with fractional chunks
datashape = [104,]
layout = (10,)
selection = getHyperslabSelection(datashape, 92, 102)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 2)
chunk_id = chunk_ids[0]
sel = getChunkCoverage(chunk_id, selection, layout)
sel = sel[0]
self.assertEqual(sel.start, 2)
self.assertEqual(sel.stop, 10)
self.assertEqual(sel.step, 1)
chunk_id = chunk_ids[1]
sel = getChunkCoverage(chunk_id, selection, layout)
sel = sel[0]
self.assertEqual(sel.start, 0)
self.assertEqual(sel.stop, 2)
self.assertEqual(sel.step, 1)
def testGetDataCoverage(self):
# 1-d test
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
datashape = [100,]
layout = (10,)
selection = getHyperslabSelection(datashape, 42, 62)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 3)
chunk_id = chunk_ids[0]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 8)
self.assertEqual(sel[0].step, 1)
chunk_id = chunk_ids[1]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 8)
self.assertEqual(sel[0].stop, 18)
self.assertEqual(sel[0].step, 1)
chunk_id = chunk_ids[2]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 18)
self.assertEqual(sel[0].stop, 20)
self.assertEqual(sel[0].step, 1)
# test with step
selection = getHyperslabSelection(datashape, 42, 68, 4)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 3)
chunk_id = chunk_ids[0]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 2)
self.assertEqual(sel[0].step, 1)
chunk_id = chunk_ids[1]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 2)
self.assertEqual(sel[0].stop, 5)
self.assertEqual(sel[0].step, 1)
chunk_id = chunk_ids[2]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 5)
self.assertEqual(sel[0].stop, 7)
self.assertEqual(sel[0].step, 1)
# 2-d test
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
datashape = [100,100]
layout = (10,10)
selection = getHyperslabSelection(datashape, (42, 46), (52, 58))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 4)
chunk_id = chunk_ids[0]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 8)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 0)
self.assertEqual(sel[1].stop, 4)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[1]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 8)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 4)
self.assertEqual(sel[1].stop, 12)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[2]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 8)
self.assertEqual(sel[0].stop, 10)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 0)
self.assertEqual(sel[1].stop, 4)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[3]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 8)
self.assertEqual(sel[0].stop, 10)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 4)
self.assertEqual(sel[1].stop, 12)
self.assertEqual(sel[1].step, 1)
# 2-d test, non-regular chunks
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
datashape = [45,54]
layout = (10,10)
selection = getHyperslabSelection(datashape, (22, 2), (23, 52))
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 6)
chunk_id = chunk_ids[0]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 1)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 0)
self.assertEqual(sel[1].stop, 8)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[1]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 1)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 8)
self.assertEqual(sel[1].stop, 18)
self.assertEqual(sel[1].step, 1)
chunk_id = chunk_ids[5]
sel = getDataCoverage(chunk_id, selection, layout)
self.assertEqual(sel[0].start, 0)
self.assertEqual(sel[0].stop, 1)
self.assertEqual(sel[0].step, 1)
self.assertEqual(sel[1].start, 48)
self.assertEqual(sel[1].stop, 50)
self.assertEqual(sel[1].step, 1)
# 1-d test with fractional chunks
datashape = [104,]
layout = (10,)
selection = getHyperslabSelection(datashape, 92, 102)
chunk_ids = getChunkIds(dset_id, selection, layout)
self.assertEqual(len(chunk_ids), 2)
chunk_id = chunk_ids[0]
sel = getDataCoverage(chunk_id, selection, layout)
sel = sel[0]
self.assertEqual(sel.start, 0)
self.assertEqual(sel.stop, 8)
self.assertEqual(sel.step, 1)
chunk_id = chunk_ids[1]
sel = getDataCoverage(chunk_id, selection, layout)
sel = sel[0]
self.assertEqual(sel.start, 8)
self.assertEqual(sel.stop, 10)
self.assertEqual(sel.step, 1)
def testGetChunkId(self):
# getChunkIds(dset_id, selection, layout, dim=0, prefix=None, chunk_ids=None):
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
layout = (1,)
chunk_id = getChunkId(dset_id, 0, layout)
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_0'))
self.assertEqual(chunk_id[2:-2], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+2)
layout = (100,)
chunk_id = getChunkId(dset_id, 2, layout)
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_0'))
self.assertEqual(chunk_id[2:-2], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+2)
layout = (10,)
chunk_id = getChunkId(dset_id, 23, layout)
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_2'))
self.assertEqual(chunk_id[2:-2], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+2)
layout = (10,20)
chunk_id = getChunkId(dset_id, (23,61), layout)
self.assertTrue(chunk_id.startswith("c-"))
self.assertTrue(chunk_id.endswith('_2_3'))
self.assertEqual(chunk_id[2:-4], dset_id[2:])
self.assertEqual(len(chunk_id), 2+36+4)
def testDimQuery(self):
request = {"dim_0": 23, "dim_1": 54, "dim_2": 2}
dims = []
dim = 0
while True:
k = "dim_{}".format(dim)
if k in request:
extent = int(request[k])
dims.append(extent)
dim += 1
else:
break
def testChunkIterator1d(self):
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
dims = [100]
layout = [10,]
selection = getHyperslabSelection(dims)
it = ChunkIterator(dset_id, selection, layout)
chunk_ids = set(getChunkIds(dset_id, selection, layout))
count = 0
while True:
try:
chunk_id = it.next()
self.assertTrue(chunk_id) in chunk_ids
count += 1
except StopIteration:
break
self.assertEqual(count, 10)
def testChunkIterator2d(self):
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
dims = [100, 100,]
layout = [50,50]
selection = getHyperslabSelection(dims)
it = ChunkIterator(dset_id, selection, layout)
chunk_ids = set(getChunkIds(dset_id, selection, layout))
count = 0
while True:
try:
chunk_id = it.next()
self.assertTrue(chunk_id) in chunk_ids
count += 1
except StopIteration:
break
self.assertEqual(count, 4)
def testChunkIterator3d(self):
dset_id = "d-12345678-1234-1234-1234-1234567890ab"
dims = [100, 100, 20]
layout = [50,50,5]
selection = getHyperslabSelection(dims)
it = ChunkIterator(dset_id, selection, layout)
chunk_ids = set(getChunkIds(dset_id, selection, layout))
count = 0
while True:
try:
chunk_id = it.next()
self.assertTrue(chunk_id) in chunk_ids
count += 1
except StopIteration:
break
self.assertEqual(count, 16)
def testGetEvalStr(self):
queries = { "date == 23": "rows['date'] == 23",
"wind == b'W 5'": "rows['wind'] == b'W 5'",
"temp > 61": "rows['temp'] > 61",
"(date >=22) & (date <= 24)": "(rows['date'] >=22) & (rows['date'] <= 24)",
"(date == 21) & (temp > 70)": "(rows['date'] == 21) & (rows['temp'] > 70)",
"(wind == b'E 7') | (wind == b'S 7')": "(rows['wind'] == b'E 7') | (rows['wind'] == b'S 7')" }
fields = ["date", "wind", "temp"]
for query in queries.keys():
eval_str = _getEvalStr(query, "rows", fields)
self.assertEqual(eval_str, queries[query])
def testBadQuery(self):
queries = ( "foobar", # no variable used
"wind = b'abc", # non-closed literal
"(wind = b'N') & (temp = 32", # missing paren
"foobar > 42", # invalid field name
"import subprocess; subprocess.call(['ls', '/'])") # injection attack
fields = ("date", "wind", "temp" )
for query in queries:
try:
eval_str = _getEvalStr(query, "x", fields)
self.assertTrue(False) # shouldn't get here
except Exception:
pass # ok
def testChunkReadSelection(self):
chunk_arr = np.array([2,3,5,7,11,13,17,19])
arr = chunkReadSelection(chunk_arr, slices=((slice(3,5,1),)))
self.assertEqual(arr.tolist(), [7,11])
arr = chunkReadSelection(chunk_arr, slices=((slice(3,9,2),)))
self.assertEqual(arr.tolist(), [7,13,19])
chunk_arr = np.zeros((3,4))
for i in range(3):
chunk_arr[i] = list(range(i+1,i+1+4))
arr = chunkReadSelection(chunk_arr, slices=((slice(1,2,1),slice(0,4,1))))
self.assertEqual(arr.tolist(), [[2.0, 3.0, 4.0, 5.0]])
arr = chunkReadSelection(chunk_arr, slices=((slice(0,3,1),slice(2,3,1))))
self.assertEqual(arr.tolist(), [[3.0],[4.0],[5.0]])
def testChunkWriteSelection(self):
chunk_arr = np.zeros((8,))
data = np.array([2,3,5,7,11,13,17,19])
chunkWriteSelection(chunk_arr=chunk_arr, slices=(slice(0,8,1),), data=data)
self.assertEqual(chunk_arr.tolist(), data.tolist())
data = np.array([101, 121, 131])
chunkWriteSelection(chunk_arr=chunk_arr, slices=(slice(3,6,1),), data=data)
self.assertEqual(chunk_arr.tolist(), [2,3,5,101,121,131,17,19])
def testChunkReadPoints1D(self):
chunk_id = "c-00de6a9c-6aff5c35-15d5-3864dd-0740f8_12"
chunk_layout = (100,)
chunk_arr = np.array(list(range(100)))
point_arr = np.array([[1200],[1299],[1244],[1222]], dtype=np.uint64)
arr = chunkReadPoints(chunk_id=chunk_id, chunk_layout=chunk_layout, chunk_arr=chunk_arr, point_arr=point_arr)
self.assertEqual(arr.tolist(), [0, 99, 44, 22])
point_arr = np.array([[1200],[1299],[1244],[1322]], dtype=np.uint64)
try:
chunkReadPoints(chunk_id=chunk_id, chunk_layout=chunk_layout, chunk_arr=chunk_arr, point_arr=point_arr)
self.assertTrue(False) # expected exception
except IndexError:
pass # expected
def testChunkReadPoints2D(self):
chunk_id = "c-00de6a9c-6aff5c35-15d5-3864dd-0740f8_3_4"
chunk_layout = (100,100)
chunk_arr = np.zeros((100,100))
chunk_arr[:,12] = 69
chunk_arr[12,:] = 96
point_arr = np.array([[312,498],[312,412],[355,412],[398,497]], dtype=np.uint64)
arr = chunkReadPoints(chunk_id=chunk_id, chunk_layout=chunk_layout, chunk_arr=chunk_arr, point_arr=point_arr)
self.assertEqual(arr.tolist(), [96,96,69,0])
point_arr = np.array([[312,498],[312,412],[355,412],[398,397]], dtype=np.uint64)
try:
chunkReadPoints(chunk_id=chunk_id, chunk_layout=chunk_layout, chunk_arr=chunk_arr, point_arr=point_arr)
self.assertTrue(False) # expected exception
except IndexError:
pass # expected
def testChunkWritePoints1D(self):
chunk_id = "c-00de6a9c-6aff5c35-15d5-3864dd-0740f8_12"
chunk_layout = (100,)
chunk_arr = np.zeros((100,))
rank = 1
# (coord1, coord2, ...) | dset_dtype
point_dt = np.dtype([("coord", np.uint64), ("val", chunk_arr.dtype)])
# point_dt = np.dtype([("coord", np.uint64, (rank,)), ("val", chunk_arr.dtype)])
indexes = (1203,1245,1288,1212,1299)
num_points = len(indexes)
point_arr = np.zeros((num_points,), dtype=point_dt)
for i in range(num_points):
e = point_arr[i]
e[0] = indexes[i]
e[1] = 42
chunkWritePoints(chunk_id=chunk_id, chunk_layout=chunk_layout, chunk_arr=chunk_arr, point_arr=point_arr)
for i in range(100):
if i + 1200 in indexes:
self.assertEqual(chunk_arr[i], 42)
else:
self.assertEqual(chunk_arr[i], 0)
e = point_arr[1]
e[0] = 99 # index out of range
try:
chunkWritePoints(chunk_id=chunk_id, chunk_layout=chunk_layout, chunk_arr=chunk_arr, point_arr=point_arr)
self.assertTrue(False) # expected exception
except IndexError:
pass # expected
def testChunkWritePoints2D(self):
chunk_id = "c-00de6a9c-6aff5c35-15d5-3864dd-0740f8_3_2"
chunk_layout = (10,20)
chunk_arr = np.zeros((10,20))
rank = 2
# (coord1, coord2, ...) | dset_dtype
point_dt = np.dtype([("coord", np.uint64, (2,)), ("val", chunk_arr.dtype)])
indexes =((32,46),(38,52),(35,53))
num_points = len(indexes)
point_arr = np.zeros((num_points,), dtype=point_dt)
for i in range(num_points):
e = point_arr[i]
e[0] = indexes[i]
e[1] = 42
chunkWritePoints(chunk_id=chunk_id, chunk_layout=chunk_layout, chunk_arr=chunk_arr, point_arr=point_arr)
chunk_index = (30,40)
for i in range(num_points):
index = indexes[i]
x = index[0]- chunk_index[0]
y = index[1] - chunk_index[1]
self.assertEqual(chunk_arr[x,y], 42)
e = point_arr[0]
e[0] = (42,46) # index out of range
try:
chunkWritePoints(chunk_id=chunk_id, chunk_layout=chunk_layout, chunk_arr=chunk_arr, point_arr=point_arr)
self.assertTrue(False) # expected exception
except IndexError:
pass # expected
def testChunkQuery(self):
chunk_id = "c-00de6a9c-6aff5c35-15d5-3864dd-0740f8_12"
chunk_layout = (100,)
value = [
("EBAY", "20170102", 3023, 3088),
("AAPL", "20170102", 3054, 2933),
("AMZN", "20170102", 2973, 3011),
("EBAY", "20170103", 3042, 3128),
("AAPL", "20170103", 3182, 3034),
("AMZN", "20170103", 3021, 2788),
("EBAY", "20170104", 2798, 2876),
("AAPL", "20170104", 2834, 2867),
("AMZN", "20170104", 2891, 2978),
("EBAY", "20170105", 2973, 2962),
("AAPL", "20170105", 2934, 3010),
("AMZN", "20170105", 3018, 3086)
]
num_rows = len(value)
chunk_dtype = np.dtype([("symbol", "S4"), ("date", "S8"), ("open", "i4"), ("close", "i4")])
chunk_arr = np.zeros(chunk_layout, dtype=chunk_dtype)
for i in range(num_rows):
row = value[i]
e = chunk_arr[i]
for j in range(4):
e[j] = row[j]
#chunkQuery(chunk_id=None, chunk_arr=None, slices=None, query=None, query_update=None, limit=0, return_json=False):
result = chunkQuery(chunk_id=chunk_id, chunk_layout=chunk_layout, chunk_arr=chunk_arr, query="symbol == b'AAPL'")
self.assertTrue(isinstance(result, np.ndarray))
result_dtype = result.dtype
self.assertEqual(len(result_dtype), 2)
self.assertEqual(result_dtype[0], | np.dtype("u8") | numpy.dtype |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 2 18:02:17 2016
@author: denis
"""
from math import pi
from itertools import islice
import numpy as np
import pandas as pd
import copy
import matplotlib.pyplot as plt
from pytrx.utils import z_str2num, z_num2str
import pkg_resources
from pytrx import hydro
from pytrx.transformation import Transformation
# from pytrx import transformation
from numba import njit, prange
from mpl_toolkits.mplot3d import Axes3D
class Molecule:
def __init__(self, Z, xyz,
calc_gr=False, rmin=0, rmax=25, dr=0.01,
associated_transformation=None, printing=True):
'''
associated_transformation will be either a transformation class or
a list of transformations
'''
if type(Z) == str:
Z = np.array([Z])
self.Z = Z
self.Z_num = np.array([z_str2num(z) for z in Z])
self.xyz = xyz.copy()
self.xyz_ref = xyz.copy()
self.printing = printing
self.reparameterized = False
# print(type(associated_transformation), Transformation)
print("Running initial check up for associated_transformation")
if associated_transformation is None:
self._associated_transformation = None
elif type(associated_transformation) == list:
if self.printing: print("associated_transformation is a list. Examining elements...")
for t in associated_transformation:
if self.printing: print(f'Checking {t}')
assert issubclass(type(t), Transformation), 'List element is not a Transformation class'
self._associated_transformation = associated_transformation
elif issubclass(type(associated_transformation), Transformation):
self._associated_transformation = [associated_transformation]
else:
raise TypeError('Supplied transformations must be None, a transformation class, or a list of it')
# self.dispersed
# self.dispersed = any([t.dw for t in self._associated_transformation])
#
self._t_keys = [] # list of transformation names - for internal use
self.par0 = {}
self.dispersed = False
if self._associated_transformation is not None:
for t in self._associated_transformation:
t.prepare(self.xyz, self.Z_num)
self._t_keys.append(t.name)
self.par0[t.name] = t.amplitude0
if t.dw:
self.dispersed = True
for key, value in zip(t.dw.suffix, t.dw.standard_value):
self.par0[t.name + key] = value
self.n_par = len(self.par0.keys())
if calc_gr: self.calcGR(rmin=rmin, rmax=rmax, dr=dr)
def calcDistMat(self, return_mat=False):
self.dist_mat = np.sqrt(np.sum((self.xyz[None, :, :] -
self.xyz[:, None, :]) ** 2, axis=2))
if return_mat: return self.dist_mat
def calcGR(self, rmin=0, rmax=25, dr=0.01):
self.calcDistMat()
self.gr = GR(self.Z, rmin=rmin, rmax=rmax, dr=dr)
self.r = self.gr.r
for pair in self.gr.el_pairs:
el1, el2 = pair
idx1, idx2 = (el1 == self.Z, el2 == self.Z)
self.gr[pair] += np.histogram(self.dist_mat[np.ix_(idx1, idx2)].ravel(),
self.gr.r_bins)[0]
def reset_xyz(self):
self.xyz = self.xyz_ref.copy() # as a numpy array we can just use the array's method
def transform(self, par=None, return_xyz=False):
'''
Transforms xyz based on the transformation supplied in the _associated_transformation.
Also takes the par which should be either None or a list that is the same length as the
number of transformations.
reprep: recalculate associated vectors, COMs, etc. after each step (as they might shift)
by calling the prepare() methods within each class.
'''
if (par is not None) and (self._associated_transformation is not None):
# Resets the coordinate set to be transformed
# self.xyz = copy.deepcopy(self.xyz_ref)
self.reset_xyz()
# assert (len(par.keys()) == len(self._associated_transformation)), \
# "Number of parameters not matching number of transformations"
for t in self._associated_transformation:
self.xyz = t.transform(self.xyz, self.Z_num, par[t.name])
if return_xyz:
return self.xyz
def s(self, q, pars=None):
if not hasattr(self, '_atomic_formfactors'):
self._atomic_formfactors = formFactor(q, self.Z)
if pars is None:
pars = self.par0
else:
# print(pars)
# print(self.par0.keys())
assert all([key in pars.keys() for key in self.par0.keys()]), \
'the input parameter dict does not contain all necessary parameter keys'
if self.reparameterized:
pars = self.convert(pars)
if not self.dispersed:
self.transform(pars)
return Debye(q, self, f=self._atomic_formfactors)
else:
pd = []
wd = []
for t in self._associated_transformation:
if t.dw:
_p, _w = t.dw.disperse(pars, t.name)
else:
_p, _w = pars[t.name], 1
pd.append(_p)
wd.append(_w)
pd_grid = [i.ravel() for i in np.meshgrid(*pd)]
wd_grid = [i.ravel() for i in np.meshgrid(*wd)]
n = len(pd_grid[0]) # number of combinations
# _bla = 0
_s = np.zeros(q.shape)
for i in range(n):
_p_dict = {}
_w = 1
for j, key in enumerate(self._t_keys):
_p_dict[key] = pd_grid[j][i]
_w *= wd_grid[j][i]
self.transform(_p_dict)
_s += _w * Debye(q, self, f=self._atomic_formfactors)
return _s
def clash(self):
# Check for clash by whether min distances between two atom types are shorter than 80 % of original (tentative)
pass
def write_xyz(self, fname):
# Write the xyz (NOT xyz_ref) to an xyz file
with open(fname, 'w') as f:
f.write(f'{len(self.Z)}')
f.write(f'\nOutput of xyz for molecule\n')
for i in range(len(self.Z)):
f.write(f'{self.Z[i]} {self.xyz[i][0]} {self.xyz[i][1]} {self.xyz[i][2]}\n')
f.write('\n')
# def sum_parameters(self):
# if self._associated_transformation is not None:
# return len(self._associated_transformation)
def calcDens(self):
self.gr.calcDens()
self.dens = self.gr.dens
def reparameterize(self, par_new, roi_dict, n=11, plotting=False):
if self.dispersed:
raise ValueError('dispersed transformations are incompatible with reparameterization')
assert self.n_par == len(par_new), 'number of new parameters must match the number of original parameters'
self._pc = ParameterConverter(self, par_new)
self._pc.define_conversion(roi_dict, n, plotting=plotting)
self.reparameterized = True
# re-"brand" the parameters:
self.reset_xyz()
self.par0 = self._pc.compute_pars(return_type=dict)
self._t_keys = list(self.par0.keys())
def convert(self, x):
return self._pc.convert(x)
# x_ar = np.array([x[key] for key in x.keys()])
# x_ar = np.hstack((x_ar, [1]))
# # print(x_ar, self.R.shape)
#
# y_out = x_ar @ self._pc.R
# return dict(zip([t.name for t in self._associated_transformation], y_out))
class ParameterConverter:
def __init__(self, molecule, pars):
self.mol = molecule
self.pars = pars # parameters to which we reparameterize
self.t_labels = list(self.mol.par0.keys())
self.R = None
def compute_pars(self, return_type=list):
out = []
for p in self.pars:
if p['type'] == 'distance':
idx1, idx2 = p['group1'], p['group2']
xyz1 = np.mean(self.mol.xyz[idx1, :], axis=0)
xyz2 = np.mean(self.mol.xyz[idx2, :], axis=0)
r = np.linalg.norm(xyz1 - xyz2)
out.append(r)
elif p['type'] == 'angle':
idx1, idx2 = p['group1'], p['group2']
n1 = self._get_normal(self.mol.xyz[idx1, :])
n2 = self._get_normal(self.mol.xyz[idx2, :])
phi = np.arccos(np.sum(n1 * n2))
out.append(np.rad2deg(phi))
if return_type == list:
return out
elif return_type == dict:
return dict(zip([p['label'] for p in self.pars], out))
def _get_normal(self, xyz):
if len(xyz) == 2:
n = xyz[0, :] - xyz[1, :]
else:
# print(xyz)
n, _, _, _ = np.linalg.lstsq(xyz, np.ones(len(xyz)), rcond=-1)
return n / np.linalg.norm(n)
def compute_grid(self, roi, n):
roi_grid = {}
for key in roi.keys():
x1, x2 = roi[key][0], roi[key][1]
roi_grid[key] = np.linspace(x1, x2, n)
grid = np.meshgrid(*[roi_grid[key] for key in self.t_labels])
return [i.ravel() for i in grid]
def define_conversion(self, roi, n, plotting=True):
grid_out = self.compute_grid(roi, n)
# print(grid)
grid_in = []
for vals in zip(*grid_out):
_p = dict(zip(self.t_labels, vals))
self.mol.transform(_p)
out = self.compute_pars()
grid_in.append(out)
grid_in = np.array(grid_in)
grid_out = np.array(grid_out).T
grid_in = np.hstack((grid_in,
np.ones((grid_in.shape[0], 1))))
# print(grid_in.shape, grid_out.shape)
self.R, _, _, _ = np.linalg.lstsq(grid_in, grid_out, rcond=-1)
if plotting:
grid_out_pred = grid_in @ self.R
fig = plt.figure()
plt.clf()
ax = fig.gca(projection='3d')
ax.plot(grid_in[:, 0], grid_in[:, 1], grid_out[:, 0], 'k.')
ax.plot(grid_in[:, 0], grid_in[:, 1], grid_out_pred[:, 0], 'r.')
def convert(self, x):
# print('BLABLABLA')
# print(x)
x_ar = np.array([float(x[key]) for key in x.keys() if key in self.mol._t_keys])
x_ar = np.hstack((x_ar, [1]))
y_out = x_ar @ self.R
return dict(zip([t.name for t in self.mol._associated_transformation], y_out))
class GR:
def __init__(self, Z, rmin=0, rmax=25, dr=0.01, r=None, el_pairs=None):
self.Z = np.unique(Z)
if el_pairs is None:
self.el_pairs = [(z_i, z_j) for i, z_i in enumerate(self.Z) for z_j in self.Z[i:]]
else:
self.el_pairs = el_pairs
if r is None:
# self.r = np.arange(rmin, rmax+dr, dr)
self.r = np.linspace(rmin, rmax, int((rmax - rmin) / dr) + 1)
else:
self.r = r
rmin, rmax, dr = r.min(), r.max(), r[1] - r[0]
# self.r_bins = np.arange(rmin-0.5*dr, rmax+1.5*dr, dr)
print(rmin, type(rmin), dr, type(dr), rmax, type(rmax))
# self.r_bins = np.linspace(float(rmin) - 0.5 * dr, float(rmax) + 0.5 * dr,
# int((float(rmax) - float(rmin)) / dr) + 2)
self.r_bins = np.linspace(float(rmin) + 0.5 * dr, float(rmax) + 0.5 * dr,
int((float(rmax) - float(rmin)) / dr) + 1)
self.gr = {}
for pair in self.el_pairs:
self.gr[frozenset(pair)] = np.zeros(self.r.size)
def __setitem__(self, key, data):
key = frozenset(key)
self.gr[key] = data
def __getitem__(self, key):
key = frozenset(key)
return self.gr[key]
def __add__(self, gr_other):
gr_out = GR(self.Z, r=self.r, el_pairs=self.el_pairs)
for pair in self.el_pairs:
gr_out[pair] = self[pair] + gr_other[pair]
return gr_out
def __sub__(self, gr_other):
gr_out = GR(self.Z, r=self.r, el_pairs=self.el_pairs)
for pair in self.el_pairs:
gr_out[pair] = self[pair] - gr_other[pair]
return gr_out
def __mul__(self, factor):
gr_out = GR(self.Z, r=self.r, el_pairs=self.el_pairs)
for pair in self.el_pairs:
gr_out[pair] = self[pair] * factor
return gr_out
def __truediv__(self, gr_other):
gr_out = GR(self.Z, r=self.r, el_pairs=self.el_pairs)
for pair in self.el_pairs:
gr_out[pair] = self[pair] / gr_other[pair]
return gr_out
def calcDens(self):
self.dens = np.zeros(self.r.shape)
for pair in self.el_pairs:
el1, el2 = pair
z1 = z_str2num(el1)
z2 = z_str2num(el2)
self.dens += z1 * z2 * self.gr[frozenset(pair)]
def save(self, fname):
n = self.r.size
m = len(self.el_pairs)
header = 'r, ' + ', '.join([ '-'.join([i for i in pair]) for pair in self.el_pairs])
data = np.zeros((n, m + 1))
data[:, 0] = self.r
for i, pair in enumerate(self.el_pairs):
if not np.all(np.isnan(self[pair])):
data[:, i + 1] = self[pair]
np.savetxt(fname, data, delimiter=', ', header=header)
### UTILS
def formFactor(q, Elements):
'''
Calculates atomic form-factor at value q
q - np.array of scattering vector values
Elements - np.array or list of elements. May be a string if one wants to
compute form-factor for only one element.
returns a dict of form factors
Examples:
q = np.arange(10)
f = formFactor(q, 'Si')
print(f['Si'])
Elements = ['Si', 'O']
f = formFactor(q, Elements)
print(f['Si'], f['O'])
'''
Elements = np.unique(Elements)
fname = pkg_resources.resource_filename('pytrx', './f0_WaasKirf.dat')
with open(fname) as f:
content = f.readlines()
s = q / (4 * pi)
formFunc = lambda sval, a: np.sum(a[None, :5] * np.exp(-a[None, 6:] * sval[:, None] ** 2), axis=1) + a[5]
f = {}
for i, x in enumerate(content):
if x[0:2] == '#S':
atom = x.split()[-1]
if any([atom == x for x in Elements]):
coef = np.fromstring(content[i + 3], sep='\t')
f[atom] = formFunc(s, coef)
return f
def diff_cage_from_dgr(q, dgr, molecule, solvent_str, r_cut=None):
ff = formFactor(q, dgr.Z)
s = np.zeros(q.shape)
r = dgr.r
w = np.ones(r.shape)
if r_cut:
w[r > r_cut] = 0
# else:
# w = np.exp(-0.5 * (r / r_damp) ** 2)
ksi = q[:, None] * r[None, :]
ksi[ksi < 1e-9] = 1e-9
# w = np.exp(-0.5*(r/5)**2)
Asin = 4 * np.pi * (r[1] - r[0]) * (np.sin(ksi) / ksi) * r[None, :] ** 2 * w
solvent = hydro.solvent_data[solvent_str]
V = solvent.molar_mass / 6.02e23 / (solvent.density / 1e30)
for el1 in np.unique(molecule.Z):
for el2 in np.unique(solvent.Z):
el_pair = (el1, el2)
if not np.all(dgr[el_pair] == 0):
n1 = np.sum(molecule.Z == el1)
n2 = np.sum(solvent.Z == el2)
# print(el1, n1, el2, n2)
_s = ff[el1] * ff[el2] * n1 * n2 / V * (Asin @ dgr[el_pair])
s += _s
return s
def diff_cave_from_dgr(q, dgr, solvent_str, r_damp=25):
ff = formFactor(q, dgr.Z)
s = np.zeros(q.shape)
r = dgr.r
ksi = q[:, None] * r[None, :]
ksi[ksi < 1e-9] = 1e-9
# w = np.exp(-0.5*(r/5)**2)
w = np.ones(r.shape)
w[r > r_damp] = 0
Asin = 4 * np.pi * (r[1] - r[0]) * (np.sin(ksi) / ksi) * r[None, :] ** 2 * w
solvent = hydro.solvent_data[solvent_str]
V = solvent.molar_mass / 6.02e23 / (solvent.density / 1e30)
for el1 in np.unique(solvent.Z):
for el2 in np.unique(solvent.Z):
el_pair = (el1, el2)
if not np.all(dgr[el_pair] == 0):
n1 = np.sum(solvent.Z == el1)
n2 = np.sum(solvent.Z == el2)
# print(el1, n1, el2, n2)
_s = ff[el1] * ff[el2] * n1 * n2 / V * (Asin @ dgr[el_pair])
s += _s
return s
def GRfromFile(filename, delimiter=', ', normalize=False, rmin=25, rmax=30):
names = np.genfromtxt(filename, delimiter=delimiter, names=True, deletechars=',').dtype.names
data = np.genfromtxt(filename, delimiter=delimiter)
# print(data)
els = []
el_pairs = []
for name in names[1:]:
new_pair = name.split('_')
if len(new_pair) == 1:
new_pair = name.split('-')
new_pair = [str.capitalize(i) for i in new_pair]
el_pairs.append([str.capitalize(i) for i in new_pair])
els += new_pair
els = np.unique(els)
# print(els)
# print(el_pairs)
gr = GR(els)
r = data[1:, 0]
for i, pair in enumerate(el_pairs):
gr_array = data[1:, i + 1]
if normalize:
rsel = (r >= rmin) & (r <= rmax)
c = np.mean(gr_array[rsel])
if c != 0:
gr_array /= c
gr[pair] = gr_array
gr.r = r
return gr
def convert2rspace(q, dsq, alpha_damp=0.15, rmax=25, dr=0.01, molecule=None):
r = np.arange(0, rmax+dr, dr)
ksi = q[None, :] * r[:, None]
ksi[ksi<1e-9] = 1e-9
if molecule:
f_sharp = get_f_sharp_for_molecule(q, molecule)
f_sharp /= f_sharp.max()
else:
f_sharp = np.ones(q.shape)
w = q * np.exp( - (alpha_damp * q)**2 ) / f_sharp
# plt.figure()
# plt.plot(q, w)
A_sin = w[None, :] * np.sin(ksi)
return r, A_sin @ dsq
def get_f_sharp_for_molecule(q, molecule):
if hasattr(molecule, '_atomic_formfactors'):
ff = molecule._atomic_formfactors
else:
ff = formFactor(q, molecule.Z)
f_sharp = np.zeros(q.size)
for i in range(molecule.Z.size):
for j in range(i + 1, molecule.Z.size):
z_i = molecule.Z[i]
z_j = molecule.Z[j]
f_sharp += 2 * ff[z_i] * ff[z_j]
return f_sharp
def Debye(q, mol, f=None, atomOnly=False, debug=False):
mol.calcDistMat()
natoms = mol.Z.size
if f is None:
f = formFactor(q, mol.Z)
if debug:
print(f)
Scoh = np.zeros(q.shape)
FFtable = np.zeros((natoms, len(q)))
for idx in range(natoms):
FFtable[idx] = f[mol.Z[idx]]
if atomOnly:
Scoh = np.zeros(q.shape)
for idx1 in range(natoms):
Scoh += f[mol.Z[idx1]] ** 2
else:
Scoh = Scoh_calc2(FFtable, q, mol.dist_mat, natoms)
if debug:
print(Scoh)
return Scoh
@njit
def Scoh_calc(FF, q, r, natoms):
Scoh = np.zeros(q.shape)
for idx1 in range(natoms):
for idx2 in range(idx1 + 1, natoms):
r12 = r[idx1, idx2]
qr12 = q * r12
Scoh += 2 * FF[idx1] * FF[idx2] * np.sin(qr12) / qr12
Scoh += FF[idx1] ** 2
return Scoh
@njit(parallel=True)
def Scoh_calc2(FF, q, r, natoms):
# Scoh = np.zeros(q.shape)
Scoh2 = np.zeros((natoms, len(q)))
for idx1 in prange(natoms):
Scoh2[idx1] += FF[idx1] ** 2
for idx2 in range(idx1 + 1, natoms):
r12 = r[idx1, idx2]
qr12 = q * r12
qr12[qr12<1e-9] = 1e-9
Scoh2[idx1] += 2 * FF[idx1] * FF[idx2] * np.sin(qr12) / qr12
return np.sum(Scoh2, axis=0)
def DebyeFromGR(q, gr, f=None, rmax=None, cage=False):
if f is None:
f = formFactor(q, gr.Z)
if rmax is None: rmax = gr.r.max()
Scoh = np.zeros(q.shape)
rsel = gr.r < rmax
qr = q[:, None] * gr.r[None, rsel]
qr[qr < 1e-6] = 1e-6
Asin = np.sin(qr) / qr
for pair in gr.el_pairs:
el1, el2 = pair
# print(Asin.shape, gr[pair].shape)
pair_scat = f[el1] * f[el2] * (Asin @ gr[pair][rsel])
if el1 == el2:
if cage:
Scoh += 2 * pair_scat
else:
Scoh += pair_scat
else:
Scoh += 2 * pair_scat
return Scoh
def ScatFromDens(q, gr):
gr.calcDens()
qr = q[:, None] * gr.r[None, :]
qr[qr < 1e-6] = 1e-6
Asin = np.sin(qr) / qr
return Asin @ gr.dens
def Compton(z, q):
fname_lowz = pkg_resources.resource_filename('pytrx', './Compton_lowZ.dat')
fname_highz = pkg_resources.resource_filename('pytrx', './Compton_highZ.dat')
data_lowz = pd.read_csv(fname_lowz, sep='\t')
data_highz = pd.read_csv(fname_highz, sep='\t')
data_lowz['Z'] = data_lowz['Z'].apply(lambda x: z_num2str(x))
data_highz['Z'] = data_highz['Z'].apply(lambda x: z_num2str(x))
Scoh = formFactor(q, z)[z] ** 2
z_num = z_str2num(z)
if z in data_lowz['Z'].values:
M, K, L = data_lowz[data_lowz['Z'] == z].values[0, 1:4]
S_inc = (z_num - Scoh / z_num) * (1 - M * ( | np.exp(-K * q / (4 * pi)) | numpy.exp |
import abc
import logging
import numpy as np
from sklearn import svm as sklearn_svm
from sklearn import calibration as sklearn_clb
import pickle
from joblib import Parallel, delayed
logger = logging.getLogger(__name__)
PRIMITIVE_TYPES = ['exsvms']
class IPrimitiveCollection(metaclass=abc.ABCMeta):
def __init__(self, prim_type):
self.name = prim_type
# Placeholders
# array of ids
self.prim_ids = None
# array of rprs
self.prim_rprs = None
# array of calibrated classifiers
self.prim_cls = None
@abc.abstractmethod
def learn(self, images_path, labels, feat_ext, **kwargs):
raise NotImplementedError()
def get_rpr(self, prim_ids):
return self.prim_rprs[np.where(self.prim_ids == prim_ids)[0][0]]
def get_ids(self):
return self.prim_ids
def get_cls(self, prim_ids):
return self.prim_cls[np.where(self.prim_ids == prim_ids)[0][0]]
def save(self, file_path):
with open(file_path, 'wb') as file:
pickle.dump(self, file, protocol=pickle.HIGHEST_PROTOCOL)
def load(self, file_path):
with open(file_path, 'rb') as file:
obj = pickle.load(file)
self.prim_ids = obj.prim_ids
self.prim_rprs = obj.prim_rprs
self.prim_cls = obj.prim_cls
@staticmethod
def factory(prim_type, **kwargs):
if prim_type == PRIMITIVE_TYPES[0]:
return SVMPrimitives(kwargs)
else:
raise ValueError("Primitives of type {} is not supported. Try {}.".format(prim_type, PRIMITIVE_TYPES))
class SVMPrimitives(IPrimitiveCollection):
def __init__(self, prim_type, **kwargs):
super().__init__(prim_type)
def learn(self, images_path, labels, feat_ext, **kwargs):
# setup parameters
num_threads = kwargs.get('num_threads', 10)
ex_size = kwargs.get('ex_size', 10)
num_ex = kwargs.get('num_ex', 10)
prim_ids = kwargs.get('prim_ids', np.arange(labels.shape[1]))
# compute features and train models
feats = feat_ext.compute(images_path)
models = Parallel(n_jobs=num_threads)(
delayed(_train_svm)(feats, labels[:, prim_id], prim_id, ex_size, num_ex) for prim_id in prim_ids)
# Post Processing
self.prim_ids = np.array(prim_ids)
self.prim_rprs, self.prim_cls = [], []
for idx, prim_id in enumerate(self.prim_ids):
self.prim_rprs.append(np.vstack([np.hstack((svm_object.intercept_.ravel(), svm_object.coef_.ravel())) for svm_object in models[idx][0]]))
self.prim_cls.append(models[idx][1])
self.prim_rprs = np.array(self.prim_rprs)
def _train_svm(feats, labels, prim_id, ex_size, num_ex):
logger.info("Training Primitive {}.".format(prim_id))
# split examplars
pos_img_ids = np.where(labels)[0]
pos_img_splits = [pos_img_ids] if num_ex == 1 else [pos_img_ids] + [
np.random.choice(pos_img_ids, size=min(ex_size, pos_img_ids.size), replace=False) for _ in range(num_ex)]
logger.info("Primitive {} has {} exemplars.".format(prim_id, len(pos_img_splits)))
svms, clbs = [], []
for ex_id, pos_ex_ids in enumerate(pos_img_splits):
if len(pos_ex_ids) > 0:
logger.info("Primitive {} training exemplar {} ...".format(prim_id, ex_id))
svm_object = sklearn_svm.LinearSVC(C=1e-3, class_weight={1: 2, -1: 1.0}, verbose=0, penalty='l2',
loss='hinge', dual=True)
neg_ex_ids = np.array([idx for idx in range(labels.size) if idx not in pos_ex_ids])
X = np.vstack([feats[pos_ex_ids], feats[neg_ex_ids]])
Y = np.hstack([np.ones(pos_ex_ids.size), -1.0 * np.ones(neg_ex_ids.size)])
svm_object.fit(X, Y)
train_acc = svm_object.score(X, Y)
svms.append(svm_object)
logger.info("SVM (Primitive {} examplar {}) has {} positives, {} negatives and accuracy {}."
.format(prim_id, ex_id, pos_ex_ids.size, neg_ex_ids.size, train_acc))
if ex_id == 0:
svm_object_clb = sklearn_svm.LinearSVC(C=1e-3, class_weight={1: 2, -1: 1.0}, verbose=0, penalty='l2',
loss='hinge', dual=True)
np.random.shuffle(pos_ex_ids)
| np.random.shuffle(neg_ex_ids) | numpy.random.shuffle |
#! /usr/bin/env python
import os
import warnings
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.axes_grid1 as axtk
from scipy.sparse import lil_matrix, csc_matrix, hstack
import abc
from . import shared_tools
class iteration_tools(abc.ABC):
"""Tools relating to the updating of the model and model I/O.
Tools defined in this class include steps to iterate for one timestep,
finalize timesteps, and saving output figures, grids, and checkpoints.
Additionally, most stratigraphy-related operations are defined here, since
these operations largely occur when saving and updating the model.
"""
def solve_water_and_sediment_timestep(self):
"""Run water and sediment operations for one timestep.
The first operation called by :meth:`update`, this method iterates the
water surface calculation and sediment parcel routing routines.
Parameters
----------
Returns
-------
"""
# start the model operations
self.eta0 = np.copy(self.eta) # copy
# water iterations
self.hook_route_water()
self.route_water()
self.hook_after_route_water()
# sediment iteration
self.hook_route_sediment()
self.route_sediment()
self.hook_after_route_sediment()
def run_one_timestep(self):
"""Deprecated, since v1.3.1. Use :obj:`solve_water_and_sediment_timestep`."""
_msg = ('`run_one_timestep` and `hook_run_one_timestep` are '
'deprecated and have been replaced with '
'`solve_water_and_sediment_timestep`. '
'Running `solve_water_and_sediment_timestep` now, but '
'this will be removed in future release.')
self.logger.warning(_msg)
warnings.warn(UserWarning(_msg))
self.solve_water_and_sediment_timestep()
def apply_subsidence(self):
"""Apply subsidence pattern.
Apply subsidence to domain if toggle_subsidence is True, and
:obj:`~pyDeltaRCM.DeltaModel.time` is ``>=``
:obj:`~pyDeltaRCM.DeltaModel.start_subsidence`. Note, that the
configuration of the :obj:`~pyDeltaRCM.DeltaModel.update()` method
determines that the subsidence may be applied before the model time
is incremented, such that subsidence will begin on the step
*following* the time step that brings the model to ``time ==
start_subsidence``.
Parameters
----------
Returns
-------
"""
if self._toggle_subsidence:
if self._time >= self._start_subsidence:
_msg = 'Applying subsidence'
self.log_info(_msg, verbosity=1)
self.eta[:] = self.eta - self.sigma
def finalize_timestep(self):
"""Finalize timestep.
Clean up after sediment routing. This includes a correction for
flooded cells that are not "wet" (via :meth:`flooding_correction`).
Update sea level if baselevel changes between timesteps.
Parameters
----------
Returns
-------
"""
_msg = 'Finalizing timestep'
self.log_info(_msg, verbosity=2)
self.flooding_correction()
self.stage[:] = np.maximum(self.stage, self._H_SL)
self.depth[:] = np.maximum(self.stage - self.eta, 0)
self.eta[0, self.inlet] = self.stage[0, self.inlet] - self._h0
self.depth[0, self.inlet] = self._h0
self.hook_compute_sand_frac()
self.compute_sand_frac()
self.H_SL = self._H_SL + self._SLR * self._dt
def log_info(self, message, verbosity=0):
"""Log message dependent on verbosity settings.
Parameters
----------
message : :obj:`str`
Message string to write to the log as info.
verbosity : :obj:`int`, optional
Verbosity threshold, whether to write the message to the log or
not. Default value is `0`, or i.e. to always log.
"""
if self._verbose >= verbosity:
self.logger.info(message)
def log_model_time(self):
"""Log the time of the model.
Reports the time to the log file, and depending on verbosity, will
report it to stdout.
"""
_timemsg = 'Time: {time:.{digits}f}; timestep: {timestep:g}'.format(
time=self._time, timestep=self._time_iter, digits=1)
self.logger.info(_timemsg)
if self._verbose > 0:
print(_timemsg)
def output_data(self):
"""Output grids and figures if needed.
"""
if self._save_time_since_data >= self.save_dt:
self.save_grids_and_figs()
self._save_iter += int(1)
self._save_time_since_data = 0
def output_checkpoint(self):
"""Output checkpoint if needed.
Save checkpoint data (including rng state) so that the model can be
resumed from this time.
Parameters
----------
Returns
-------
"""
if self._save_time_since_checkpoint >= self.checkpoint_dt:
if self._save_checkpoint:
_msg = 'Saving checkpoint'
self.log_info(_msg, verbosity=1)
self.save_the_checkpoint()
if self._checkpoint_dt != self._save_dt:
_msg = ('Grid save interval and checkpoint interval are '
'not identical, this may result in duplicate '
'entries in the output NetCDF4 after resuming '
'the model run.')
self.logger.warning(_msg)
self._save_time_since_checkpoint = 0
def compute_sand_frac(self):
"""Compute the sand fraction as a continous updating data field.
Parameters
----------
Returns
-------
"""
_msg = 'Computing bed sand fraction'
self.log_info(_msg, verbosity=2)
# layer attributes at time t
actlyr_thick = self._active_layer_thickness
actlyr_top = np.copy(self.eta0)
actlyr_bot = actlyr_top - actlyr_thick
deta = self.eta - self.eta0
# everywhere the bed has degraded this timestep
whr_deg = (deta < 0)
if | np.any(whr_deg) | numpy.any |
import numpy as np
# 多边形周长
# shape of polygon: [N, 2]
def Perimeter(polygon: np.array):
N, d = polygon.shape
if N < 3 or d != 2:
raise ValueError
permeter = 0.
for i in range(N):
permeter += np.linalg.norm(polygon[i-1] - polygon[i])
return permeter
# 面积
def Area(polygon: np.array):
N, d = polygon.shape
if N < 3 or d != 2:
raise ValueError
area = 0.
vector_1 = polygon[1] - polygon[0]
for i in range(2, N):
vector_2 = polygon[i] - polygon[0]
area += np.abs(np.cross(vector_1, vector_2))
vector_1 = vector_2
return area / 2
# |r| < 1
# r > 0, 内缩
# r < 0, 外扩
def calc_shrink_width(polygon: np.array, r):
area = Area(polygon)
perimeter = Perimeter(polygon)
L = area * (1 - r ** 2) / perimeter
return L if r > 0 else -L
def shrink_polygon(polygon: np.array, r):
N, d = polygon.shape
if N < 3 or d != 2:
raise ValueError
shrinked_polygon = []
L = calc_shrink_width(polygon, r)
for i in range(N):
Pi = polygon[i]
v1 = polygon[i-1] - Pi
v2 = polygon[(i+1)%N] - Pi
normalize_v1 = v1 / | np.linalg.norm(v1) | numpy.linalg.norm |
import numpy as np
from scipy.stats import multivariate_normal
from scipy.special import logsumexp
from sklearn import cluster
from sklearn.utils import check_array, check_random_state
from . import hsmm_core_x as core, hsmm_utils
from .hsmm_utils import log_mask_zero, iter_from_X_lengths
# Base Class for Explicit Duration HSMM
class HSMM:
def __init__(self, n_states=2, n_durations=5, n_iter=20, tol=1e-2, rnd_state=None):
if not n_states >= 2:
raise ValueError("number of states (n_states) must be at least 2")
if not n_durations >= 1:
raise ValueError("number of durations (n_durations) must be at least 1")
self.n_states = n_states
self.n_durations = n_durations
self.n_iter = n_iter
self.tol = tol
self.rnd_state = rnd_state
# _init: initializes model parameters if there are none yet
def _init(self):
if not hasattr(self, "pi"):
self.pi = np.full(self.n_states, 1.0 / self.n_states)
if not hasattr(self, "tmat"):
self.tmat = np.full((self.n_states, self.n_states), 1.0 / (self.n_states - 1))
for i in range(self.n_states):
self.tmat[i, i] = 0.0 # no self-transitions in EDHSMM
self._dur_init() # duration
# _check: check if properties of model parameters are satisfied
def _check(self):
# starting probabilities
self.pi = np.asarray(self.pi)
if self.pi.shape != (self.n_states, ):
raise ValueError("start probabilities (self.pi) must have shape ({},)".format(self.n_states))
if not np.allclose(self.pi.sum(), 1.0):
raise ValueError("start probabilities (self.pi) must add up to 1.0")
# transition probabilities
self.tmat = np.asarray(self.tmat)
if self.tmat.shape != (self.n_states, self.n_states):
raise ValueError("transition matrix (self.tmat) must have shape ({0}, {0})".format(self.n_states))
if not np.allclose(self.tmat.sum(axis=1), 1.0):
raise ValueError("transition matrix (self.tmat) must add up to 1.0")
for i in range(self.n_states):
if self.tmat[i, i] != 0.0: # check for diagonals
raise ValueError("transition matrix (self.tmat) must have all diagonals equal to 0.0")
# duration probabilities
self._dur_check()
# _dur_init: initializes duration parameters if there are none yet
def _dur_init(self):
"""
arguments: (self)
return: None
> initialize the duration parameters
"""
pass # implemented in subclass
# _dur_check: checks if properties of duration parameters are satisfied
def _dur_check(self):
"""
arguments: (self)
return: None
> check the duration parameters
"""
pass # implemented in subclass
# _dur_probmat: compute the probability per state of each duration
def _dur_probmat(self):
"""
arguments: (self)
return: duration probability matrix
"""
pass # implemented in subclass
# _dur_mstep: perform m-step for duration parameters
def _dur_mstep(self):
"""
arguments: (self, new_dur)
return: None
> compute the duration parameters
"""
pass # implemented in subclass
# _emission_logprob: compute the log-likelihood per state of each observation
def _emission_logprob(self):
"""
arguments: (self, X)
return: logframe
"""
pass # implemented in subclass
# _emission_pre_mstep: prepare m-step for emission parameters
def _emission_pre_mstep(self):
"""
arguments: (self, gamma, emission_var)
return: None
> process gamma and save output to emission_var
"""
pass # implemented in subclass
# _emission_mstep: perform m-step for emission parameters
def _emission_mstep(self):
"""
arguments: (self, X, emission_var)
return: None
> compute the emission parameters
"""
pass # implemented in subclass
# _state_sample: generate 'observation' for given state
def _state_sample(self):
"""
arguments: (self, state, rnd_state=None)
return: np.ndarray of length equal to dimension of observation
> generate sample from state
"""
pass # implemented in subclass
# sample: generate random observation series
def sample(self, n_samples=5, censoring=1, rnd_state=None):
self._init(None) # see "note for programmers" in init() in GaussianHSMM
self._check()
# setup random state
if rnd_state is None:
rnd_state = self.rnd_state
rnd_checked = check_random_state(rnd_state)
# adapted from hmmlearn 0.2.3 (see _BaseHMM.score function)
pi_cdf = np.cumsum(self.pi)
tmat_cdf = np.cumsum(self.tmat, axis=1)
dur_cdf = np.cumsum(self._dur_probmat(), axis=1)
# for first state
currstate = (pi_cdf > rnd_checked.rand()).argmax() # argmax() returns only the first occurrence
currdur = (dur_cdf[currstate] > rnd_checked.rand()).argmax() + 1
if censoring == 0 and currdur > n_samples:
print("SAMPLE: n_samples is too small to contain the first state duration.")
return None
state_sequence = [currstate] * currdur
X = [self._state_sample(currstate, rnd_checked) for i in range(currdur)] # generate 'observation'
ctr_sample = currdur
# for next state transitions
while ctr_sample < n_samples:
currstate = (tmat_cdf[currstate] > rnd_checked.rand()).argmax()
currdur = (dur_cdf[currstate] > rnd_checked.rand()).argmax() + 1
# test if now in the end of generating samples
if ctr_sample + currdur > n_samples:
if censoring == 0:
break # if without right censoring, do not include exceeding state duration
else:
currdur = n_samples - ctr_sample # if with right censoring, cap the samples to n_samples
state_sequence += [currstate] * currdur
X += [self._state_sample(currstate, rnd_checked) for i in range(currdur)] # generate 'observation'
ctr_sample += currdur
return ctr_sample, np.atleast_2d(X), np.array(state_sequence, dtype=int)
# _core_u_only: container for core._u_only (for multiple observation sequences)
def _core_u_only(self, logframe):
n_samples = logframe.shape[0]
u = np.empty((n_samples, self.n_states, self.n_durations))
core._u_only(n_samples, self.n_states, self.n_durations,
logframe, u)
return u
# _core_forward: container for core._forward (for multiple observation sequences)
def _core_forward(self, u, logdur, censoring):
n_samples = u.shape[0]
if censoring == 0: # without right censoring
eta = np.empty((n_samples, self.n_states, self.n_durations))
else: # with right censoring
eta = np.empty((n_samples + self.n_durations - 1, self.n_states, self.n_durations))
xi = np.empty((n_samples, self.n_states, self.n_states))
core._forward(n_samples, self.n_states, self.n_durations,
log_mask_zero(self.pi),
log_mask_zero(self.tmat),
logdur, censoring, eta, u, xi)
return eta, xi
# _core_backward: container for core._backward (for multiple observation sequences)
def _core_backward(self, u, logdur, censoring):
n_samples = u.shape[0]
beta = np.empty((n_samples, self.n_states))
betastar = np.empty((n_samples, self.n_states))
core._backward(n_samples, self.n_states, self.n_durations,
log_mask_zero(self.pi),
log_mask_zero(self.tmat),
logdur, censoring, beta, u, betastar)
return beta, betastar
# _core_smoothed: container for core._smoothed (for multiple observation sequences)
def _core_smoothed(self, beta, betastar, censoring, eta, xi):
n_samples = beta.shape[0]
gamma = np.empty((n_samples, self.n_states))
core._smoothed(n_samples, self.n_states, self.n_durations,
beta, betastar, censoring, eta, xi, gamma)
return gamma
# _core_viterbi: container for core._viterbi (for multiple observation sequences)
def _core_viterbi(self, u, logdur, censoring):
n_samples = u.shape[0]
state_sequence, log_prob = core._viterbi(n_samples, self.n_states, self.n_durations,
log_mask_zero(self.pi),
log_mask_zero(self.tmat),
logdur, censoring, u)
return state_sequence, log_prob
# score: log-likelihood computation from observation series
def score(self, X, lengths=None, censoring=1):
X = check_array(X)
self._init(X)
self._check()
logdur = log_mask_zero(self._dur_probmat()) # build logdur
# main computations
log_prob = 0
for i, j in iter_from_X_lengths(X, lengths):
logframe = self._emission_logprob(X[i:j]) # build logframe
u = self._core_u_only(logframe)
_, betastar = self._core_backward(u, logdur, censoring)
gammazero = log_mask_zero(self.pi) + betastar[0]
log_prob += logsumexp(gammazero)
return log_prob
# predict: hidden state & duration estimation from observation series
def predict(self, X, lengths=None, censoring=1):
X = check_array(X)
self._init(X)
self._check()
logdur = log_mask_zero(self._dur_probmat()) # build logdur
# main computations
log_prob = 0
state_sequence = np.empty(X.shape[0], dtype=int) # total n_samples = X.shape[0]
for i, j in iter_from_X_lengths(X, lengths):
logframe = self._emission_logprob(X[i:j]) # build logframe
u = self._core_u_only(logframe)
iter_state_sequence, iter_log_prob = self._core_viterbi(u, logdur, censoring)
log_prob += iter_log_prob
state_sequence[i:j] = iter_state_sequence
return state_sequence, log_prob
# fit: parameter estimation from observation series
def fit(self, X, lengths=None, censoring=1):
X = check_array(X)
self._init(X)
self._check()
# main computations
for itera in range(self.n_iter):
score = 0
pi_num = np.full(self.n_states, -np.inf)
tmat_num = dur_num = -np.inf
emission_var = [None] # see "note for programmers" in _emission_pre_mstep() in GaussianHSMM
logdur = log_mask_zero(self._dur_probmat()) # build logdur
for i, j in iter_from_X_lengths(X, lengths):
logframe = self._emission_logprob(X[i:j]) # build logframe
u = self._core_u_only(logframe)
eta, xi = self._core_forward(u, logdur, censoring)
beta, betastar = self._core_backward(u, logdur, censoring)
gamma = self._core_smoothed(beta, betastar, censoring, eta, xi)
score += logsumexp(gamma[0, :]) # this is the output of 'score' function
# preparation for reestimation / M-step
# this will make fit() slower than the previous version :(
xi.resize(j - i + 1, self.n_states, self.n_states)
eta.resize(j - i + 1, self.n_states, self.n_durations)
xi[j - i] = tmat_num
eta[j - i] = dur_num
pi_num = logsumexp([pi_num, gamma[0]], axis=0)
tmat_num = logsumexp(xi, axis=0)
dur_num = logsumexp(eta, axis=0)
self._emission_pre_mstep(gamma, emission_var)
# check for loop break
if itera > 0 and (score - old_score) < self.tol:
print("FIT: converged at {}th loop.".format(itera + 1))
break
else:
old_score = score
# reestimation / M-step
self.pi = np.exp(pi_num - logsumexp(pi_num))
self.tmat = np.exp(tmat_num - logsumexp(tmat_num, axis=1)[None].T)
new_dur = np.exp(dur_num - logsumexp(dur_num, axis=1)[None].T)
self._dur_mstep(new_dur) # new durations
self._emission_mstep(X, emission_var[0]) # new emissions
print("FIT: reestimation complete for {}th loop.".format(itera + 1))
# Sample Subclass: Explicit Duration HSMM with Gaussian Emissions
class GaussianHSMM(HSMM):
def __init__(self, n_states=2, n_durations=5, n_iter=20, tol=1e-2, rnd_state=None):
super().__init__(n_states, n_durations, n_iter, tol, rnd_state)
def _init(self, X):
super()._init()
# note for programmers: for every attribute that needs X in score()/predict()/fit(),
# there must be a condition 'if X is None' because sample() doesn't need an X, but
# default attribute values must be initiated for sample() to proceed.
if True: # always change self.n_dim
if X is None: # default for sample()
self.n_dim = 1
else:
self.n_dim = X.shape[1]
if not hasattr(self, "mean"):
if X is None: # default for sample()
# self.mean = [[0.], [1.], [2.], ...]
self.mean = | np.arange(0., self.n_states) | numpy.arange |
import os
import sys
import socket
import numpy as np
import h5py
import scipy
from scipy.io import loadmat
sys.path.append('./latent_3d_points_py3/')
from latent_3d_points_py3.src import in_out
from latent_3d_points_py3.src.general_utils import plot_3d_point_cloud
from functools import partial
import tqdm
import tensorflow as tf
import tensorflow.math as tm
import multiprocessing
import torch
# from numpy import pi, cos, sin, arccos, arange
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Download dataset for point cloud classification
DATA_DIR = os.path.join(BASE_DIR, 'data')
if socket.gethostname == 'tianxing-GS73-7RE':
SHAPENET_DIR = '/media/tianxing/Data/datasets/shape_net_core_uniform_samples_2048/'
else:
SHAPENET_DIR = './data/shape_net_core_uniform_samples_2048/'
scratch_shapenet_dir = '/scratch/shape_net_core_uniform_samples_2048'
if os.path.exists(scratch_shapenet_dir):
SHAPENET_DIR = scratch_shapenet_dir
print(f'Loading shapenet from {SHAPENET_DIR}')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def get_shapenet_data():
labels_lst = list(in_out.snc_category_to_synth_id().keys())
data = []
labels = []
for label in tqdm.tqdm(labels_lst, desc='loading data'):
syn_id = in_out.snc_category_to_synth_id()[label]
class_dir = os.path.join(SHAPENET_DIR , syn_id)
pc = in_out.load_all_point_clouds_under_folder(class_dir, n_threads=8, file_ending='*.ply', verbose=False)
cur_data, _, _ = pc.full_epoch_data(shuffle=False)
data.append(cur_data)
labels.append([labels_lst.index(label)] * cur_data.shape[0])
current_data = np.concatenate(data, axis=0)
current_label = np.concatenate(labels, axis=0)
print(current_data.shape)
print(current_label.shape)
current_data, current_label, _ = shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
return current_data, current_label
def shuffle_data(data, labels):
""" Shuffle data and labels.
Input:
data: B,N,... numpy array
label: B,... numpy array
Return:
shuffled data, label and shuffle indices
"""
idx = np.arange(len(labels))
np.random.shuffle(idx)
return data[idx, ...], labels[idx], idx
def rotate_point_cloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
Bx1 array, rotated angle for all point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
angles = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = | np.sin(rotation_angle) | numpy.sin |
from datetime import date, datetime
from functools import reduce
from typing import Any, Union
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import polars as pl
from polars import testing
from polars.datatypes import Float64, Int32, Int64, UInt32, UInt64
def _getattr_multi(obj: object, op: str) -> Any:
""" "
Allows `op` to be multiple layers deep, i.e. op="str.lengths" will mean we first
get the attribute "str", and then the attribute "lengths"
"""
op_list = op.split(".")
return reduce(lambda o, m: getattr(o, m), op_list, obj)
def verify_series_and_expr_api(
input: pl.Series, expected: pl.Series, op: str, *args: Any, **kwargs: Any
) -> None:
"""
Small helper function to test element-wise functions for both the series and expressions api.
Examples
--------
>>> s = pl.Series([1, 3, 2])
>>> expected = pl.Series([1, 2, 3])
>>> verify_series_and_expr_api(s, expected, "sort")
"""
expr = _getattr_multi(pl.col("*"), op)(*args, **kwargs)
result_expr: pl.Series = input.to_frame().select(expr)[:, 0] # type: ignore
result_series = _getattr_multi(input, op)(*args, **kwargs)
testing.assert_series_equal(result_expr, expected)
testing.assert_series_equal(result_series, expected)
def test_cum_agg() -> None:
s = pl.Series("a", [1, 2, 3, 2])
verify_series_and_expr_api(s, pl.Series("a", [1, 3, 6, 8]), "cumsum")
verify_series_and_expr_api(s, pl.Series("a", [1, 1, 1, 1]), "cummin")
verify_series_and_expr_api(s, pl.Series("a", [1, 2, 3, 3]), "cummax")
verify_series_and_expr_api(s, pl.Series("a", [1, 2, 6, 12]), "cumprod")
def test_init_inputs(monkeypatch: Any) -> None:
for flag in [False, True]:
monkeypatch.setattr(pl.internals.construction, "_PYARROW_AVAILABLE", flag)
# Good inputs
pl.Series("a", [1, 2])
pl.Series("a", values=[1, 2])
pl.Series(name="a", values=[1, 2])
pl.Series(values=[1, 2], name="a")
assert pl.Series([1, 2]).dtype == pl.Int64
assert pl.Series(values=[1, 2]).dtype == pl.Int64
assert pl.Series("a").dtype == pl.Float32 # f32 type used in case of no data
assert pl.Series().dtype == pl.Float32
assert pl.Series(values=[True, False]).dtype == pl.Boolean
assert pl.Series(values=np.array([True, False])).dtype == pl.Boolean
assert pl.Series(values=np.array(["foo", "bar"])).dtype == pl.Utf8
assert pl.Series(values=["foo", "bar"]).dtype == pl.Utf8
assert (
pl.Series("a", [pl.Series([1, 2, 4]), pl.Series([3, 2, 1])]).dtype
== pl.List
)
assert pl.Series("a", [10000, 20000, 30000], dtype=pl.Time).dtype == pl.Time
# 2d numpy array
res = pl.Series(name="a", values=np.array([[1, 2], [3, 4]]))
assert all(res[0] == np.array([1, 2]))
assert all(res[1] == np.array([3, 4]))
assert (
pl.Series(values=np.array([["foo", "bar"], ["foo2", "bar2"]])).dtype
== pl.Object
)
# lists
assert pl.Series("a", [[1, 2], [3, 4]]).dtype == pl.List
# pandas
assert pl.Series(pd.Series([1, 2])).dtype == pl.Int64
# Bad inputs
with pytest.raises(ValueError):
pl.Series([1, 2, 3], [1, 2, 3])
with pytest.raises(ValueError):
pl.Series({"a": [1, 2, 3]})
with pytest.raises(OverflowError):
pl.Series("bigint", [2**64])
def test_concat() -> None:
s = pl.Series("a", [2, 1, 3])
assert pl.concat([s, s]).len() == 6
# check if s remains unchanged
assert s.len() == 3
def test_to_frame() -> None:
s = pl.Series([1, 2])
assert s.to_frame().shape == (2, 1)
def test_bitwise_ops() -> None:
a = pl.Series([True, False, True])
b = pl.Series([False, True, True])
assert (a & b).series_equal(pl.Series([False, False, True]))
assert (a | b).series_equal(pl.Series([True, True, True]))
assert (a ^ b).series_equal(pl.Series([True, True, False]))
assert (~a).series_equal(pl.Series([False, True, False]))
# rand/rxor/ror we trigger by casting the left hand to a list here in the test
# Note that the type annotations only allow Series to be passed in, but there is
# specific code to deal with non-Series inputs.
assert (True & a).series_equal(pl.Series([True, False, True])) # type: ignore
assert (True | a).series_equal(pl.Series([True, True, True])) # type: ignore
assert (True ^ a).series_equal(pl.Series([False, True, False])) # type: ignore
def test_bitwise_floats_invert() -> None:
a = pl.Series([2.0, 3.0, 0.0])
assert ~a == NotImplemented
def test_equality() -> None:
a = pl.Series("a", [1, 2])
b = a
cmp = a == b
assert isinstance(cmp, pl.Series)
assert cmp.sum() == 2
assert (a != b).sum() == 0
assert (a >= b).sum() == 2
assert (a <= b).sum() == 2
assert (a > b).sum() == 0
assert (a < b).sum() == 0
assert a.sum() == 3
assert a.series_equal(b)
a = pl.Series("name", ["ham", "foo", "bar"])
testing.assert_series_equal((a == "ham"), pl.Series("name", [True, False, False]))
def test_agg() -> None:
series = pl.Series("a", [1, 2])
assert series.mean() == 1.5
assert series.min() == 1
assert series.max() == 2
@pytest.mark.parametrize(
"s", [pl.Series([1, 2], dtype=Int64), pl.Series([1, 2], dtype=Float64)]
)
def test_arithmetic(s: pl.Series) -> None:
a = s
b = s
assert ((a * b) == [1, 4]).sum() == 2
assert ((a / b) == [1.0, 1.0]).sum() == 2
assert ((a + b) == [2, 4]).sum() == 2
assert ((a - b) == [0, 0]).sum() == 2
assert ((a + 1) == [2, 3]).sum() == 2
assert ((a - 1) == [0, 1]).sum() == 2
assert ((a / 1) == [1.0, 2.0]).sum() == 2
assert ((a // 2) == [0, 1]).sum() == 2
assert ((a * 2) == [2, 4]).sum() == 2
assert ((1 + a) == [2, 3]).sum() == 2
assert ((1 - a) == [0, -1]).sum() == 2
assert ((1 * a) == [1, 2]).sum() == 2
# integer division
testing.assert_series_equal(1 / a, pl.Series([1.0, 0.5]))
if s.dtype == Int64:
expected = pl.Series([1, 0])
else:
expected = pl.Series([1.0, 0.5])
testing.assert_series_equal(1 // a, expected)
# modulo
assert ((1 % a) == [0, 1]).sum() == 2
assert ((a % 1) == [0, 0]).sum() == 2
# negate
assert (-a == [-1, -2]).sum() == 2
# wrong dtypes in rhs operands
assert ((1.0 - a) == [0.0, -1.0]).sum() == 2
assert ((1.0 / a) == [1.0, 0.5]).sum() == 2
assert ((1.0 * a) == [1, 2]).sum() == 2
assert ((1.0 + a) == [2, 3]).sum() == 2
assert ((1.0 % a) == [0, 1]).sum() == 2
a = pl.Series("a", [datetime(2021, 1, 1)])
with pytest.raises(ValueError):
a // 2
with pytest.raises(ValueError):
a / 2
with pytest.raises(ValueError):
a * 2
with pytest.raises(ValueError):
a % 2
with pytest.raises(ValueError):
a**2
with pytest.raises(ValueError):
2 / a
with pytest.raises(ValueError):
2 // a
with pytest.raises(ValueError):
2 * a
with pytest.raises(ValueError):
2 % a
with pytest.raises(ValueError):
2**a
def test_add_string() -> None:
s = pl.Series(["hello", "weird"])
result = s + " world"
testing.assert_series_equal(result, pl.Series(["hello world", "weird world"]))
def test_append_extend() -> None:
a = pl.Series("a", [1, 2])
b = pl.Series("b", [8, 9, None])
a.append(b, append_chunks=False)
expected = pl.Series("a", [1, 2, 8, 9, None])
assert a.series_equal(expected, null_equal=True)
print(a.chunk_lengths())
assert a.n_chunks() == 1
def test_various() -> None:
a = pl.Series("a", [1, 2])
assert a.is_null().sum() == 0
assert a.name == "a"
a.rename("b", in_place=True)
assert a.name == "b"
assert a.len() == 2
assert len(a) == 2
b = a.slice(1, 1)
assert b.len() == 1
assert b.series_equal(pl.Series("b", [2]))
a.append(b)
assert a.series_equal(pl.Series("b", [1, 2, 2]))
a = pl.Series("a", range(20))
assert a.head(5).len() == 5
assert a.tail(5).len() == 5
assert a.head(5) != a.tail(5)
a = pl.Series("a", [2, 1, 4])
a.sort(in_place=True)
assert a.series_equal(pl.Series("a", [1, 2, 4]))
a = pl.Series("a", [2, 1, 1, 4, 4, 4])
testing.assert_series_equal(a.arg_unique(), pl.Series("a", [0, 1, 3], dtype=UInt32))
assert a.take([2, 3]).series_equal(pl.Series("a", [1, 4]))
assert a.is_numeric()
a = pl.Series("bool", [True, False])
assert not a.is_numeric()
def test_filter_ops() -> None:
a = pl.Series("a", range(20))
assert a[a > 1].len() == 18
assert a[a < 1].len() == 1
assert a[a <= 1].len() == 2
assert a[a >= 1].len() == 19
assert a[a == 1].len() == 1
assert a[a != 1].len() == 19
def test_cast() -> None:
a = pl.Series("a", range(20))
assert a.cast(pl.Float32).dtype == pl.Float32
assert a.cast(pl.Float64).dtype == pl.Float64
assert a.cast(pl.Int32).dtype == pl.Int32
assert a.cast(pl.UInt32).dtype == pl.UInt32
assert a.cast(pl.Datetime).dtype == pl.Datetime
assert a.cast(pl.Date).dtype == pl.Date
def test_to_python() -> None:
a = pl.Series("a", range(20))
b = a.to_list()
assert isinstance(b, list)
assert len(b) == 20
b = a.to_list(use_pyarrow=True)
assert isinstance(b, list)
assert len(b) == 20
a = pl.Series("a", [1, None, 2])
assert a.null_count() == 1
assert a.to_list() == [1, None, 2]
def test_sort() -> None:
a = pl.Series("a", [2, 1, 3])
testing.assert_series_equal(a.sort(), pl.Series("a", [1, 2, 3]))
testing.assert_series_equal(a.sort(reverse=True), pl.Series("a", [3, 2, 1]))
def test_rechunk() -> None:
a = pl.Series("a", [1, 2, 3])
b = pl.Series("b", [4, 5, 6])
a.append(b)
assert a.n_chunks() == 2
assert a.rechunk(in_place=False).n_chunks() == 1
a.rechunk(in_place=True)
assert a.n_chunks() == 1
def test_indexing() -> None:
a = pl.Series("a", [1, 2, None])
assert a[1] == 2
assert a[2] is None
b = pl.Series("b", [True, False])
assert b[0]
assert not b[1]
a = pl.Series("a", ["a", None])
assert a[0] == "a"
assert a[1] is None
a = pl.Series("a", [0.1, None])
assert a[0] == 0.1
assert a[1] is None
def test_arrow() -> None:
a = pl.Series("a", [1, 2, 3, None])
out = a.to_arrow()
assert out == pa.array([1, 2, 3, None])
a = pa.array(["foo", "bar"], pa.dictionary(pa.int32(), pa.utf8()))
s = pl.Series("a", a)
assert s.dtype == pl.Categorical
assert (
pl.from_arrow(pa.array([["foo"], ["foo", "bar"]], pa.list_(pa.utf8()))).dtype
== pl.List
)
def test_view() -> None:
a = pl.Series("a", [1.0, 2.0, 3.0])
assert isinstance(a.view(), np.ndarray)
assert np.all(a.view() == | np.array([1, 2, 3]) | numpy.array |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# ------------------------------------------------------------------------------
from collections import deque
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models import *
from models.decode import mot_decode
from models.model import create_model, load_model
from models.utils import _tranpose_and_gather_feat, _tranpose_and_gather_feat_expand
from tracker import matching
from tracking_utils.kalman_filter import KalmanFilter
from tracking_utils.log import logger
from tracking_utils.utils import *
from utils.post_process import ctdet_post_process
from cython_bbox import bbox_overlaps as bbox_ious
from .basetrack import BaseTrack, TrackState
from scipy.optimize import linear_sum_assignment
import random
import pickle
import copy
class GaussianBlurConv(nn.Module):
def __init__(self, channels=3):
super(GaussianBlurConv, self).__init__()
self.channels = channels
kernel = [[0.00078633, 0.00655965, 0.01330373, 0.00655965, 0.00078633],
[0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965],
[0.01330373, 0.11098164, 0.22508352, 0.11098164, 0.01330373],
[0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965],
[0.00078633, 0.00655965, 0.01330373, 0.00655965, 0.00078633]]
kernel = torch.FloatTensor(kernel).unsqueeze(0).unsqueeze(0)
kernel = np.repeat(kernel, self.channels, axis=0)
self.weight = nn.Parameter(data=kernel, requires_grad=False)
def __call__(self, x):
x = F.conv2d(x, self.weight, padding=2, groups=self.channels)
return x
gaussianBlurConv = GaussianBlurConv().cuda()
seed = 0
random.seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Remove randomness (may be slower on Tesla GPUs)
# https://pytorch.org/docs/stable/notes/randomness.html
if seed == 0:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
smoothL1 = torch.nn.SmoothL1Loss()
mse = torch.nn.MSELoss()
td_ = {}
def bbox_dis(bbox1, bbox2):
center1 = (bbox1[:, :2] + bbox1[:, 2:]) / 2
center2 = (bbox2[:, :2] + bbox2[:, 2:]) / 2
center1 = np.repeat(center1.reshape(-1, 1, 2), len(bbox2), axis=1)
center2 = np.repeat(center2.reshape(1, -1, 2), len(bbox1), axis=0)
dis = np.sqrt(np.sum((center1 - center2) ** 2, axis=-1))
return dis
class STrack(BaseTrack):
shared_kalman = KalmanFilter()
shared_kalman_ = KalmanFilter()
def __init__(self, tlwh, score, temp_feat, buffer_size=30):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.score = score
self.tracklet_len = 0
self.exist_len = 1
self.smooth_feat = None
self.smooth_feat_ad = None
self.update_features(temp_feat)
self.features = deque([], maxlen=buffer_size)
self.alpha = 0.9
self.curr_tlbr = self.tlwh_to_tlbr(self._tlwh)
self.det_dict = {}
def get_v(self):
return self.mean[4:6] if self.mean is not None else None
def update_features_ad(self, feat):
feat /= np.linalg.norm(feat)
if self.smooth_feat_ad is None:
self.smooth_feat_ad = feat
else:
self.smooth_feat_ad = self.alpha * self.smooth_feat_ad + (1 - self.alpha) * feat
self.smooth_feat_ad /= np.linalg.norm(self.smooth_feat_ad)
def update_features(self, feat):
feat /= np.linalg.norm(feat)
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def predict(self):
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
@staticmethod
def multi_predict(stracks):
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
@staticmethod
def multi_predict_(stracks):
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman_.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
def activate(self, kalman_filter, frame_id, track_id=None):
"""Start a new tracklet"""
self.kalman_filter = kalman_filter
if track_id:
self.track_id = track_id['track_id']
track_id['track_id'] += 1
else:
self.track_id = self.next_id()
self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def activate_(self, kalman_filter, frame_id, track_id=None):
"""Start a new tracklet"""
self.kalman_filter = kalman_filter
if track_id:
self.track_id = track_id['track_id']
track_id['track_id'] += 1
else:
self.track_id = self.next_id_()
self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
def re_activate(self, new_track, frame_id, new_id=False):
self.curr_tlbr = self.tlwh_to_tlbr(new_track.tlwh)
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
)
self.update_features(new_track.curr_feat)
self.tracklet_len = 0
self.exist_len += 1
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
def re_activate_(self, new_track, frame_id, new_id=False):
self.curr_tlbr = self.tlwh_to_tlbr(new_track.tlwh)
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
)
self.update_features(new_track.curr_feat)
self.tracklet_len = 0
self.exist_len += 1
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id_()
def update(self, new_track, frame_id, update_feature=True):
"""
Update a matched track
:type new_track: STrack
:type frame_id: int
:type update_feature: bool
:return:
"""
self.frame_id = frame_id
self.tracklet_len += 1
self.exist_len += 1
self.curr_tlbr = self.tlwh_to_tlbr(new_track.tlwh)
new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
if update_feature:
self.update_features(new_track.curr_feat)
@property
# @jit(nopython=True)
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
# @jit(nopython=True)
def tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
def to_xyah(self):
return self.tlwh_to_xyah(self.tlwh)
@staticmethod
# @jit(nopython=True)
def tlbr_to_tlwh(tlbr):
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
@staticmethod
# @jit(nopython=True)
def tlwh_to_tlbr(tlwh):
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
class JDETracker(object):
def __init__(
self,
opt,
frame_rate=30,
tracked_stracks=[],
lost_stracks=[],
removed_stracks=[],
frame_id=0,
ad_last_info={},
model=None
):
self.opt = opt
print('Creating model...')
if model:
self.model = model
else:
self.model = create_model(opt.arch, opt.heads, opt.head_conv)
self.model = load_model(self.model, opt.load_model).cuda()
self.model.eval()
self.log_index = []
self.unconfirmed_ad_iou = None
self.tracked_stracks_ad_iou = None
self.strack_pool_ad_iou = None
self.tracked_stracks = copy.deepcopy(tracked_stracks) # type: list[STrack]
self.lost_stracks = copy.deepcopy(lost_stracks) # type: list[STrack]
self.removed_stracks = copy.deepcopy(removed_stracks) # type: list[STrack]
self.tracked_stracks_ad = copy.deepcopy(tracked_stracks) # type: list[STrack]
self.lost_stracks_ad = copy.deepcopy(lost_stracks) # type: list[STrack]
self.removed_stracks_ad = copy.deepcopy(removed_stracks) # type: list[STrack]
self.tracked_stracks_ = copy.deepcopy(tracked_stracks) # type: list[STrack]
self.lost_stracks_ = copy.deepcopy(lost_stracks) # type: list[STrack]
self.removed_stracks_ = copy.deepcopy(removed_stracks) # type: list[STrack]
self.frame_id = frame_id
self.frame_id_ = frame_id
self.frame_id_ad = frame_id
self.det_thresh = opt.conf_thres
self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
self.max_time_lost = self.buffer_size
self.max_per_image = 128
self.kalman_filter = KalmanFilter()
self.kalman_filter_ad = KalmanFilter()
self.kalman_filter_ = KalmanFilter()
self.attacked_ids = set([])
self.low_iou_ids = set([])
self.ATTACK_IOU_THR = opt.iou_thr
self.attack_iou_thr = self.ATTACK_IOU_THR
self.ad_last_info = copy.deepcopy(ad_last_info)
self.FRAME_THR = 10
self.temp_i = 0
self.multiple_ori_ids = {}
self.multiple_att_ids = {}
self.multiple_ori2att = {}
self.multiple_att_freq = {}
# hijacking attack
self.ad_bbox = True
self.ad_ids = set([])
def post_process(self, dets, meta):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.opt.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
return dets[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.opt.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.opt.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.opt.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
@staticmethod
def recoverImg(im_blob, img0):
height = 608
width = 1088
im_blob = im_blob.cpu() * 255.0
shape = img0.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
im_blob = im_blob.squeeze().permute(1, 2, 0)[top:height - bottom, left:width - right, :].numpy().astype(
np.uint8)
im_blob = cv2.cvtColor(im_blob, cv2.COLOR_RGB2BGR)
h, w, _ = img0.shape
im_blob = cv2.resize(im_blob, (w, h))
return im_blob
def recoverNoise(self, noise, img0):
height = 608
width = 1088
shape = img0.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
noise = noise[:, :, top:height - bottom, left:width - right]
h, w, _ = img0.shape
# noise = self.resizeTensor(noise, h, w).cpu().squeeze().permute(1, 2, 0).numpy()
noise = noise.cpu().squeeze().permute(1, 2, 0).numpy()
noise = (noise[:, :, ::-1] * 255).astype(np.int)
return noise
@staticmethod
def resizeTensor(tensor, height, width):
h = torch.linspace(-1, 1, height).view(-1, 1).repeat(1, width).to(tensor.device)
w = torch.linspace(-1, 1, width).repeat(height, 1).to(tensor.device)
grid = torch.cat((h.unsqueeze(2), w.unsqueeze(2)), dim=2)
grid = grid.unsqueeze(0)
output = F.grid_sample(tensor, grid=grid, mode='bilinear', align_corners=True)
return output
@staticmethod
def processIoUs(ious):
h, w = ious.shape
assert h == w
ious = np.tril(ious, -1)
index = np.argsort(-ious.reshape(-1))
indSet = set([])
for ind in index:
i = ind // h
j = ind % w
if ious[i, j] == 0:
break
if i in indSet or j in indSet:
ious[i, j] = 0
else:
indSet.add(i)
indSet.add(j)
return ious
def attack_sg_hj(
self,
im_blob,
img0,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind,
ad_bbox,
track_v
):
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
H, W = outputs_ori['hm'].size()[2:]
hm_index = inds[0][remain_inds]
hm_index_att = hm_index[attack_ind].item()
index = list(range(hm_index.size(0)))
index.pop(attack_ind)
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
i = 0
while True:
i += 1
loss = 0
hm_index_att_lst = [hm_index_att]
loss -= ((outputs['hm'].view(-1)[hm_index_att_lst].sigmoid()) ** 2).mean()
if ad_bbox:
assert track_v is not None
hm_index_gen = hm_index_att_lst[0]
hm_index_gen += -(np.sign(track_v[0]) + W * np.sign(track_v[1]))
loss -= ((1 - outputs['hm'].view(-1)[[hm_index_gen]].sigmoid()) ** 2).mean()
loss -= smoothL1(outputs['wh'].view(2, -1)[:, [hm_index_gen]].T,
wh_ori.view(2, -1)[:, hm_index_att_lst].T)
loss -= smoothL1(outputs['reg'].view(2, -1)[:, [hm_index_gen]].T,
reg_ori.view(2, -1)[:, hm_index_att_lst].T)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad * 2
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
outputs, suc, _ = self.forwardFeatureDet(
im_blob,
img0,
dets,
[attack_ind],
thr=1 if ad_bbox else 0,
vs=[track_v] if ad_bbox else []
)
if suc:
break
if i > 60:
break
return noise, i, suc
def attack_sg_det(
self,
im_blob,
img0,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind
):
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
H, W = outputs_ori['hm'].size()[2:]
hm_index = inds[0][remain_inds]
hm_index_att = hm_index[attack_ind].item()
index = list(range(hm_index.size(0)))
index.pop(attack_ind)
i = 0
while True:
i += 1
loss = 0
hm_index_att_lst = [hm_index_att]
# for n_i in range(3):
# for n_j in range(3):
# hm_index_att_ = hm_index_att + (n_i - 1) * W + (n_j - 1)
# hm_index_att_ = max(0, min(H * W - 1, hm_index_att_))
# hm_index_att_lst.append(hm_index_att_)
loss -= ((outputs['hm'].view(-1)[hm_index_att_lst].sigmoid()) ** 2).mean()
# loss += ((outputs['hm'].view(-1)[hm_index_att_lst].sigmoid()) ** 2 *
# torch.log(1 - outputs['hm'].view(-1)[hm_index_att_lst].sigmoid())).mean()
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad * 2
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
outputs, suc, _ = self.forwardFeatureDet(
im_blob,
img0,
dets,
[attack_ind]
)
if suc:
break
if i > 60:
break
return noise, i, suc
def attack_mt_hj(
self,
im_blob,
img0,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_ids,
attack_inds,
ad_ids,
track_vs
):
img0_h, img0_w = img0.shape[:2]
H, W = outputs_ori['hm'].size()[2:]
r_w, r_h = img0_w / W, img0_h / H
r_max = max(r_w, r_h)
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
i = 0
hm_index = inds[0][remain_inds]
hm_index_att_lst = hm_index[attack_inds].cpu().numpy().tolist()
best_i = None
best_noise = None
best_fail = np.inf
while True:
i += 1
loss = 0
loss -= ((outputs['hm'].view(-1)[hm_index_att_lst].sigmoid()) ** 2).mean()
hm_index_att_lst_ = [hm_index_att_lst[j] for j in range(len(hm_index_att_lst))
if attack_ids[j] not in ad_ids]
if len(hm_index_att_lst_):
assert len(track_vs) == len(hm_index_att_lst_)
hm_index_gen_lst = []
for index in range(len(hm_index_att_lst_)):
track_v = track_vs[index]
hm_index_gen = hm_index_att_lst_[index]
hm_index_gen += -(np.sign(track_v[0]) + W * np.sign(track_v[1]))
hm_index_gen_lst.append(hm_index_gen)
loss -= ((1 - outputs['hm'].view(-1)[hm_index_gen_lst].sigmoid()) ** 2).mean()
loss -= smoothL1(outputs['wh'].view(2, -1)[:, hm_index_gen_lst].T,
wh_ori.view(2, -1)[:, hm_index_att_lst_].T)
loss -= smoothL1(outputs['reg'].view(2, -1)[:, hm_index_gen_lst].T,
reg_ori.view(2, -1)[:, hm_index_att_lst_].T)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
thrs = [0 for j in range(len(attack_inds))]
for j in range(len(thrs)):
if attack_ids[j] not in ad_ids:
thrs[j] = 0.9
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
outputs, suc, fail_ids = self.forwardFeatureDet(
im_blob,
img0,
dets,
attack_inds.tolist(),
thr=thrs
)
if fail_ids is not None:
if fail_ids == 0:
break
elif fail_ids <= best_fail:
best_fail = fail_ids
best_i = i
best_noise = noise.clone()
if i > 60:
if self.opt.no_f_noise:
return None, i, False
else:
if best_i is not None:
noise = best_noise
i = best_i
return noise, i, False
return noise, i, True
def attack_mt_det(
self,
im_blob,
img0,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_ids,
attack_inds
):
img0_h, img0_w = img0.shape[:2]
H, W = outputs_ori['hm'].size()[2:]
r_w, r_h = img0_w / W, img0_h / H
r_max = max(r_w, r_h)
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
i = 0
hm_index = inds[0][remain_inds]
hm_index_att_lst = hm_index[attack_inds].cpu().numpy().tolist()
best_i = None
best_noise = None
best_fail = np.inf
while True:
i += 1
loss = 0
loss -= ((outputs['hm'].view(-1)[hm_index_att_lst].sigmoid()) ** 2).mean()
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
outputs, suc, fail_ids = self.forwardFeatureDet(
im_blob,
img0,
dets,
attack_inds.tolist()
)
if fail_ids is not None:
if fail_ids == 0:
break
elif fail_ids <= best_fail:
best_fail = fail_ids
best_i = i
best_noise = noise.clone()
if i > 60:
if self.opt.no_f_noise:
return None, i, False
else:
if best_i is not None:
noise = best_noise
i = best_i
return noise, i, False
return noise, i, True
def attack_sg_feat(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind,
target_id,
target_ind
):
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
last_ad_id_features = [None for _ in range(len(id_features[0]))]
for i in range(len(id_features)):
id_features[i] = id_features[i][[attack_ind, target_ind]]
i = 0
suc = True
while True:
i += 1
loss = 0
loss_feat = 0
for id_i, id_feature in enumerate(id_features):
if last_ad_id_features[attack_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[attack_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
loss_feat += sim_2 - sim_1
if last_ad_id_features[target_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[target_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
loss_feat += sim_2 - sim_1
if last_ad_id_features[attack_ind] is None and last_ad_id_features[target_ind] is None:
loss_feat += torch.mm(id_feature[0:0 + 1], id_feature[1:1 + 1].T).squeeze()
loss += loss_feat / len(id_features)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features_, outputs_, ae_attack_id, ae_target_id, hm_index_ = self.forwardFeatureSg(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_id,
attack_ind,
target_id,
target_ind,
last_info
)
if id_features_ is not None:
id_features = id_features_
if ae_attack_id != attack_id and ae_attack_id is not None:
break
if i > 60:
suc = False
break
return noise, i, suc
def attack_sg_cl(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind,
target_id,
target_ind
):
img0_h, img0_w = img0.shape[:2]
H, W = outputs_ori['hm'].size()[2:]
r_w, r_h = img0_w / W, img0_h / H
r_max = max(r_w, r_h)
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
last_ad_id_features = [None for _ in range(len(id_features[0]))]
strack_pool = copy.deepcopy(last_info['last_strack_pool'])
last_attack_det = None
last_target_det = None
STrack.multi_predict(strack_pool)
for strack in strack_pool:
if strack.track_id == attack_id:
last_ad_id_features[attack_ind] = strack.smooth_feat
last_attack_det = torch.from_numpy(strack.tlbr).cuda().float()
last_attack_det[[0, 2]] = (last_attack_det[[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_attack_det[[1, 3]] = (last_attack_det[[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
elif strack.track_id == target_id:
last_ad_id_features[target_ind] = strack.smooth_feat
last_target_det = torch.from_numpy(strack.tlbr).cuda().float()
last_target_det[[0, 2]] = (last_target_det[[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_target_det[[1, 3]] = (last_target_det[[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
last_attack_det_center = torch.round(
(last_attack_det[:2] + last_attack_det[2:]) / 2) if last_attack_det is not None else None
last_target_det_center = torch.round(
(last_target_det[:2] + last_target_det[2:]) / 2) if last_target_det is not None else None
hm_index = inds[0][remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][[attack_ind, target_ind]]
i = 0
j = -1
suc = True
ori_hm_index = hm_index[[attack_ind, target_ind]].clone()
ori_hm_index_re = hm_index[[target_ind, attack_ind]].clone()
att_hm_index = None
noise_0 = None
i_0 = None
noise_1 = None
i_1 = None
while True:
i += 1
loss = 0
loss_feat = 0
# for id_i, id_feature in enumerate(id_features):
# if last_ad_id_features[attack_ind] is not None:
# last_ad_id_feature = torch.from_numpy(last_ad_id_features[attack_ind]).unsqueeze(0).cuda()
# sim_1 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
# sim_2 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
# loss_feat += sim_2 - sim_1
# if last_ad_id_features[target_ind] is not None:
# last_ad_id_feature = torch.from_numpy(last_ad_id_features[target_ind]).unsqueeze(0).cuda()
# sim_1 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
# sim_2 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
# loss_feat += sim_2 - sim_1
# if last_ad_id_features[attack_ind] is None and last_ad_id_features[target_ind] is None:
# loss_feat += torch.mm(id_feature[0:0 + 1], id_feature[1:1 + 1].T).squeeze()
# loss += loss_feat / len(id_features)
if i in [1, 10, 20, 30, 35, 40, 45, 50, 55]:
attack_det_center = torch.stack([hm_index[attack_ind] % W, hm_index[attack_ind] // W]).float()
target_det_center = torch.stack([hm_index[target_ind] % W, hm_index[target_ind] // W]).float()
if last_target_det_center is not None:
attack_center_delta = attack_det_center - last_target_det_center
if torch.max(torch.abs(attack_center_delta)) > 1:
attack_center_delta /= torch.max(torch.abs(attack_center_delta))
attack_det_center = torch.round(attack_det_center - attack_center_delta).int()
hm_index[attack_ind] = attack_det_center[0] + attack_det_center[1] * W
if last_attack_det_center is not None:
target_center_delta = target_det_center - last_attack_det_center
if torch.max(torch.abs(target_center_delta)) > 1:
target_center_delta /= torch.max(torch.abs(target_center_delta))
target_det_center = torch.round(target_det_center - target_center_delta).int()
hm_index[target_ind] = target_det_center[0] + target_det_center[1] * W
att_hm_index = hm_index[[attack_ind, target_ind]].clone()
if att_hm_index is not None:
n_att_hm_index = []
n_ori_hm_index_re = []
for hm_ind in range(len(att_hm_index)):
for n_i in range(3):
for n_j in range(3):
att_hm_ind = att_hm_index[hm_ind].item()
att_hm_ind = att_hm_ind + (n_i - 1) * W + (n_j - 1)
att_hm_ind = max(0, min(H*W-1, att_hm_ind))
n_att_hm_index.append(att_hm_ind)
ori_hm_ind = ori_hm_index_re[hm_ind].item()
ori_hm_ind = ori_hm_ind + (n_i - 1) * W + (n_j - 1)
ori_hm_ind = max(0, min(H * W - 1, ori_hm_ind))
n_ori_hm_index_re.append(ori_hm_ind)
# print(n_att_hm_index, n_ori_hm_index_re)
loss += ((1 - outputs['hm'].view(-1).sigmoid()[n_att_hm_index]) ** 2 *
torch.log(outputs['hm'].view(-1).sigmoid()[n_att_hm_index])).mean()
loss += ((outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re]) ** 2 *
torch.log(1 - outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re])).mean()
loss -= smoothL1(outputs['wh'].view(2, -1)[:, n_att_hm_index].T, wh_ori.view(2, -1)[:, n_ori_hm_index_re].T)
loss -= smoothL1(outputs['reg'].view(2, -1)[:, n_att_hm_index].T, reg_ori.view(2, -1)[:, n_ori_hm_index_re].T)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features_, outputs_, ae_attack_id, ae_target_id, hm_index_ = self.forwardFeatureSg(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_id,
attack_ind,
target_id,
target_ind,
last_info
)
if id_features_ is not None:
id_features = id_features_
if outputs_ is not None:
outputs = outputs_
# if hm_index_ is not None:
# hm_index = hm_index_
if ae_attack_id != attack_id and ae_attack_id is not None:
break
if i > 60:
if noise_0 is not None:
return noise_0, i_0, suc
elif noise_1 is not None:
return noise_1, i_1, suc
if self.opt.no_f_noise:
return None, i, False
else:
suc = False
break
return noise, i, suc
def attack_sg_random(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind,
target_id,
target_ind
):
im_blob_ori = im_blob.clone().data
suc = False
noise = torch.rand(im_blob_ori.size()).to(im_blob_ori.device)
noise /= (noise**2).sum().sqrt()
noise *= random.uniform(2, 8)
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features_, outputs_, ae_attack_id, ae_target_id, hm_index_ = self.forwardFeatureSg(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_id,
attack_ind,
target_id,
target_ind,
last_info,
grad=False
)
if ae_attack_id != attack_id and ae_attack_id is not None:
suc = True
return noise, 1, suc
def attack_mt_random(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_ids,
attack_inds,
target_ids,
target_inds
):
im_blob_ori = im_blob.clone().data
suc = False
noise = torch.rand(im_blob_ori.size()).to(im_blob_ori.device)
noise /= (noise ** 2).sum().sqrt()
noise *= random.uniform(2, 8)
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features, outputs, fail_ids = self.forwardFeatureMt(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_ids,
attack_inds,
target_ids,
target_inds,
last_info,
grad=False
)
if fail_ids == 0:
suc = True
return noise, 1, suc
def attack_sg(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_id,
attack_ind,
target_id,
target_ind
):
img0_h, img0_w = img0.shape[:2]
H, W = outputs_ori['hm'].size()[2:]
r_w, r_h = img0_w / W, img0_h / H
r_max = max(r_w, r_h)
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
last_ad_id_features = [None for _ in range(len(id_features[0]))]
strack_pool = copy.deepcopy(last_info['last_strack_pool'])
last_attack_det = None
last_target_det = None
STrack.multi_predict(strack_pool)
for strack in strack_pool:
if strack.track_id == attack_id:
last_ad_id_features[attack_ind] = strack.smooth_feat
last_attack_det = torch.from_numpy(strack.tlbr).cuda().float()
last_attack_det[[0, 2]] = (last_attack_det[[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_attack_det[[1, 3]] = (last_attack_det[[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
elif strack.track_id == target_id:
last_ad_id_features[target_ind] = strack.smooth_feat
last_target_det = torch.from_numpy(strack.tlbr).cuda().float()
last_target_det[[0, 2]] = (last_target_det[[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_target_det[[1, 3]] = (last_target_det[[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
last_attack_det_center = torch.round(
(last_attack_det[:2] + last_attack_det[2:]) / 2) if last_attack_det is not None else None
last_target_det_center = torch.round(
(last_target_det[:2] + last_target_det[2:]) / 2) if last_target_det is not None else None
hm_index = inds[0][remain_inds]
for i in range(len(id_features)):
id_features[i] = id_features[i][[attack_ind, target_ind]]
i = 0
j = -1
suc = True
ori_hm_index = hm_index[[attack_ind, target_ind]].clone()
ori_hm_index_re = hm_index[[target_ind, attack_ind]].clone()
att_hm_index = None
noise_0 = None
i_0 = None
noise_1 = None
i_1 = None
while True:
i += 1
loss = 0
loss_feat = 0
for id_i, id_feature in enumerate(id_features):
if last_ad_id_features[attack_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[attack_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
loss_feat += sim_2 - sim_1
if last_ad_id_features[target_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[target_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[1:1 + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[0:0 + 1], last_ad_id_feature.T).squeeze()
loss_feat += sim_2 - sim_1
if last_ad_id_features[attack_ind] is None and last_ad_id_features[target_ind] is None:
loss_feat += torch.mm(id_feature[0:0 + 1], id_feature[1:1 + 1].T).squeeze()
loss += loss_feat / len(id_features)
if i in [10, 20, 30, 35, 40, 45, 50, 55]:
attack_det_center = torch.stack([hm_index[attack_ind] % W, hm_index[attack_ind] // W]).float()
target_det_center = torch.stack([hm_index[target_ind] % W, hm_index[target_ind] // W]).float()
if last_target_det_center is not None:
attack_center_delta = attack_det_center - last_target_det_center
if torch.max(torch.abs(attack_center_delta)) > 1:
attack_center_delta /= torch.max(torch.abs(attack_center_delta))
attack_det_center = torch.round(attack_det_center - attack_center_delta).int()
hm_index[attack_ind] = attack_det_center[0] + attack_det_center[1] * W
if last_attack_det_center is not None:
target_center_delta = target_det_center - last_attack_det_center
if torch.max(torch.abs(target_center_delta)) > 1:
target_center_delta /= torch.max(torch.abs(target_center_delta))
target_det_center = torch.round(target_det_center - target_center_delta).int()
hm_index[target_ind] = target_det_center[0] + target_det_center[1] * W
att_hm_index = hm_index[[attack_ind, target_ind]].clone()
if att_hm_index is not None:
n_att_hm_index = []
n_ori_hm_index_re = []
for hm_ind in range(len(att_hm_index)):
for n_i in range(3):
for n_j in range(3):
att_hm_ind = att_hm_index[hm_ind].item()
att_hm_ind = att_hm_ind + (n_i - 1) * W + (n_j - 1)
att_hm_ind = max(0, min(H*W-1, att_hm_ind))
n_att_hm_index.append(att_hm_ind)
ori_hm_ind = ori_hm_index_re[hm_ind].item()
ori_hm_ind = ori_hm_ind + (n_i - 1) * W + (n_j - 1)
ori_hm_ind = max(0, min(H * W - 1, ori_hm_ind))
n_ori_hm_index_re.append(ori_hm_ind)
# print(n_att_hm_index, n_ori_hm_index_re)
loss += ((1 - outputs['hm'].view(-1).sigmoid()[n_att_hm_index]) ** 2 *
torch.log(outputs['hm'].view(-1).sigmoid()[n_att_hm_index])).mean()
loss += ((outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re]) ** 2 *
torch.log(1 - outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re])).mean()
loss -= smoothL1(outputs['wh'].view(2, -1)[:, n_att_hm_index].T, wh_ori.view(2, -1)[:, n_ori_hm_index_re].T)
loss -= smoothL1(outputs['reg'].view(2, -1)[:, n_att_hm_index].T, reg_ori.view(2, -1)[:, n_ori_hm_index_re].T)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features_, outputs_, ae_attack_id, ae_target_id, hm_index_ = self.forwardFeatureSg(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_id,
attack_ind,
target_id,
target_ind,
last_info
)
if id_features_ is not None:
id_features = id_features_
if outputs_ is not None:
outputs = outputs_
# if hm_index_ is not None:
# hm_index = hm_index_
if ae_attack_id != attack_id and ae_attack_id is not None:
break
if i > 60:
if noise_0 is not None:
return noise_0, i_0, suc
elif noise_1 is not None:
return noise_1, i_1, suc
if self.opt.no_f_noise:
return None, i, False
else:
suc = False
break
return noise, i, suc
def attack_mt(
self,
im_blob,
img0,
id_features,
dets,
inds,
remain_inds,
last_info,
outputs_ori,
attack_ids,
attack_inds,
target_ids,
target_inds
):
img0_h, img0_w = img0.shape[:2]
H, W = outputs_ori['hm'].size()[2:]
r_w, r_h = img0_w / W, img0_h / H
r_max = max(r_w, r_h)
noise = torch.zeros_like(im_blob)
im_blob_ori = im_blob.clone().data
outputs = outputs_ori
wh_ori = outputs['wh'].clone().data
reg_ori = outputs['reg'].clone().data
i = 0
j = -1
last_ad_id_features = [None for _ in range(len(id_features[0]))]
strack_pool = copy.deepcopy(last_info['last_strack_pool'])
ad_attack_ids = [self.multiple_ori2att[attack_id] for attack_id in attack_ids]
ad_target_ids = [self.multiple_ori2att[target_id] for target_id in target_ids]
last_attack_dets = [None] * len(ad_attack_ids)
last_target_dets = [None] * len(ad_target_ids)
STrack.multi_predict(strack_pool)
for strack in strack_pool:
if strack.track_id in ad_attack_ids:
index = ad_attack_ids.index(strack.track_id)
last_ad_id_features[attack_inds[index]] = strack.smooth_feat
last_attack_dets[index] = torch.from_numpy(strack.tlbr).cuda().float()
last_attack_dets[index][[0, 2]] = (last_attack_dets[index][[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_attack_dets[index][[1, 3]] = (last_attack_dets[index][[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
if strack.track_id in ad_target_ids:
index = ad_target_ids.index(strack.track_id)
last_ad_id_features[target_inds[index]] = strack.smooth_feat
last_target_dets[index] = torch.from_numpy(strack.tlbr).cuda().float()
last_target_dets[index][[0, 2]] = (last_target_dets[index][[0, 2]] - 0.5 * W * (r_w - r_max)) / r_max
last_target_dets[index][[1, 3]] = (last_target_dets[index][[1, 3]] - 0.5 * H * (r_h - r_max)) / r_max
last_attack_dets_center = []
for det in last_attack_dets:
if det is None:
last_attack_dets_center.append(None)
else:
last_attack_dets_center.append((det[:2] + det[2:]) / 2)
last_target_dets_center = []
for det in last_target_dets:
if det is None:
last_target_dets_center.append(None)
else:
last_target_dets_center.append((det[:2] + det[2:]) / 2)
hm_index = inds[0][remain_inds]
ori_hm_index_re_lst = []
for ind in range(len(attack_ids)):
attack_ind = attack_inds[ind]
target_ind = target_inds[ind]
ori_hm_index_re_lst.append(hm_index[[target_ind, attack_ind]].clone())
att_hm_index_lst = []
best_i = None
best_noise = None
best_fail = np.inf
while True:
i += 1
loss = 0
loss_feat = 0
for index, attack_id in enumerate(attack_ids):
target_id = target_ids[index]
attack_ind = attack_inds[index]
target_ind = target_inds[index]
for id_i, id_feature in enumerate(id_features):
if last_ad_id_features[attack_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[attack_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[attack_ind:attack_ind + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[target_ind:target_ind + 1], last_ad_id_feature.T).squeeze()
if self.opt.hard_sample > 0:
loss_feat += torch.clamp(sim_2 - sim_1, max=self.opt.hard_sample)
else:
loss_feat += sim_2 - sim_1
if last_ad_id_features[target_ind] is not None:
last_ad_id_feature = torch.from_numpy(last_ad_id_features[target_ind]).unsqueeze(0).cuda()
sim_1 = torch.mm(id_feature[target_ind:target_ind + 1], last_ad_id_feature.T).squeeze()
sim_2 = torch.mm(id_feature[attack_ind:attack_ind + 1], last_ad_id_feature.T).squeeze()
if self.opt.hard_sample > 0:
loss_feat += torch.clamp(sim_2 - sim_1, max=self.opt.hard_sample)
else:
loss_feat += sim_2 - sim_1
if last_ad_id_features[attack_ind] is None and last_ad_id_features[target_ind] is None:
loss_feat += torch.mm(id_feature[attack_ind:attack_ind + 1],
id_feature[target_ind:target_ind + 1].T).squeeze()
if i in [10, 20, 30, 35, 40, 45, 50, 55]:
attack_det_center = torch.stack([hm_index[attack_ind] % W, hm_index[attack_ind] // W]).float()
target_det_center = torch.stack([hm_index[target_ind] % W, hm_index[target_ind] // W]).float()
if last_target_dets_center[index] is not None:
attack_center_delta = attack_det_center - last_target_dets_center[index]
if torch.max(torch.abs(attack_center_delta)) > 1:
attack_center_delta /= torch.max(torch.abs(attack_center_delta))
attack_det_center = torch.round(attack_det_center - attack_center_delta).int()
hm_index[attack_ind] = attack_det_center[0] + attack_det_center[1] * W
if last_attack_dets_center[index] is not None:
target_center_delta = target_det_center - last_attack_dets_center[index]
if torch.max(torch.abs(target_center_delta)) > 1:
target_center_delta /= torch.max(torch.abs(target_center_delta))
target_det_center = torch.round(target_det_center - target_center_delta).int()
hm_index[target_ind] = target_det_center[0] + target_det_center[1] * W
if index == 0:
att_hm_index_lst = []
att_hm_index_lst.append(hm_index[[attack_ind, target_ind]].clone())
loss += loss_feat / len(id_features)
if len(att_hm_index_lst):
assert len(att_hm_index_lst) == len(ori_hm_index_re_lst)
n_att_hm_index_lst = []
n_ori_hm_index_re_lst = []
for lst_ind in range(len(att_hm_index_lst)):
for hm_ind in range(len(att_hm_index_lst[lst_ind])):
for n_i in range(3):
for n_j in range(3):
att_hm_ind = att_hm_index_lst[lst_ind][hm_ind].item()
att_hm_ind = att_hm_ind + (n_i - 1) * W + (n_j - 1)
att_hm_ind = max(0, min(H*W-1, att_hm_ind))
n_att_hm_index_lst.append(att_hm_ind)
ori_hm_ind = ori_hm_index_re_lst[lst_ind][hm_ind].item()
ori_hm_ind = ori_hm_ind + (n_i - 1) * W + (n_j - 1)
ori_hm_ind = max(0, min(H * W - 1, ori_hm_ind))
n_ori_hm_index_re_lst.append(ori_hm_ind)
# print(n_att_hm_index, n_ori_hm_index_re)
loss += ((1 - outputs['hm'].view(-1).sigmoid()[n_att_hm_index_lst]) ** 2 *
torch.log(outputs['hm'].view(-1).sigmoid()[n_att_hm_index_lst])).mean()
loss += ((outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re_lst]) ** 2 *
torch.log(1 - outputs['hm'].view(-1).sigmoid()[n_ori_hm_index_re_lst])).mean()
loss -= smoothL1(outputs['wh'].view(2, -1)[:, n_att_hm_index_lst].T, wh_ori.view(2, -1)[:, n_ori_hm_index_re_lst].T)
loss -= smoothL1(outputs['reg'].view(2, -1)[:, n_att_hm_index_lst].T, reg_ori.view(2, -1)[:, n_ori_hm_index_re_lst].T)
loss.backward()
grad = im_blob.grad
grad /= (grad ** 2).sum().sqrt() + 1e-8
noise += grad
im_blob = torch.clip(im_blob_ori + noise, min=0, max=1).data
id_features, outputs, fail_ids = self.forwardFeatureMt(
im_blob,
img0,
dets,
inds,
remain_inds,
attack_ids,
attack_inds,
target_ids,
target_inds,
last_info
)
if fail_ids is not None:
if fail_ids == 0:
break
elif fail_ids <= best_fail:
best_fail = fail_ids
best_i = i
best_noise = noise.clone()
if i > 60:
if self.opt.no_f_noise:
return None, i, False
else:
if best_i is not None:
noise = best_noise
i = best_i
return noise, i, False
return noise, i, True
def forwardFeatureDet(self, im_blob, img0, dets_, attack_inds, thr=0, vs=[]):
width = img0.shape[1]
height = img0.shape[0]
inp_height = im_blob.shape[2]
inp_width = im_blob.shape[3]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
im_blob.requires_grad = True
self.model.zero_grad()
output = self.model(im_blob)[-1]
hm = output['hm'].sigmoid()
wh = output['wh']
reg = output['reg'] if self.opt.reg_offset else None
dets_raw, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = self.post_process(dets_raw.clone(), meta)
dets = self.merge_outputs([dets])[1]
remain_inds = dets[:, 4] > self.opt.conf_thres
dets = dets[remain_inds]
ious = bbox_ious( | np.ascontiguousarray(dets_[:, :4], dtype=np.float) | numpy.ascontiguousarray |
import argparse
import os
import sys
import numpy as np
import pdb
from tqdm import tqdm
import cv2
import glob
import numpy as np
from numpy import *
import matplotlib
#matplotlib.use("Agg")
#matplotlib.use("wx")
#matplotlib.use('tkagg')
import matplotlib.pyplot as plt
import scipy
from scipy.special import softmax
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torch.nn as nn
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.deeplab import *
from PIL import Image
# class load_data(Dataset):
# def __init__(self,args,img_path):
# super().__init__()
# self.args = args
# self.img_path = img_path
# def __getitem__(self,img_path):
# image = Image.open(self.img_path).convert('RGB')
# image = np.array(image).astype(np.float32).transpose((2, 0, 1))
# image = torch.from_numpy(image).float()
# return image
def get_model(nclass,args):
model = DeepLab(num_classes=nclass,
backbone=args.backbone,
output_stride=args.out_stride,
sync_bn=args.sync_bn,
freeze_bn=args.freeze_bn)
# Using cuda
if args.cuda:
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
patch_replication_callback(model)
model = model.cuda()
checkpoint = torch.load(args.resume)
if args.cuda:
model.module.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
return model
def get_pred(img_path,model,args):
model.eval()
image = Image.open(img_path).convert('RGB')
#image = image.resize((512,512), Image.ANTIALIAS)
image = np.array(image).astype(np.float32).transpose((2, 0, 1))
image = np.expand_dims(image, axis=0)
image = torch.from_numpy(image).float()
if args.cuda:
image = image.cuda()
with torch.no_grad():
output = model(image)
#pdb.set_trace()
# normalize = nn.Softmax(dim=1)
# output = normalize(output)
pred = output.data.cpu().numpy()
return pred
def F1_loss(pred,target):
N = np.logical_or(pred,target) # logical
Tp = np.logical_and(pred,target)
Fn = np.subtract(target,Tp) # element-wise subtraction in pytorch
#Fn = np.bitwise_xor(target,Tp)
Fp = | np.subtract(pred,Tp) | numpy.subtract |
import numpy as np
import pandas as pd
rng = | np.random.RandomState(1) | numpy.random.RandomState |
import numpy as np
from mayavi import mlab
def sectional2nodal(x):
return np.r_[x[0], np.convolve(x, [0.5, 0.5], "valid"), x[-1]]
def nodal2sectional(x):
return 0.5 * (x[:-1] + x[1:])
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5 * max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
class Visualize(object):
def __init__(self, prob):
prob.run_model()
self.prob = prob
self.fig = None
def draw_spar(self, fname="spar.png"):
self.init_figure()
self.draw_ocean()
self.draw_mooring(self.prob["mooring_plot_matrix"])
zcut = 1.0 + self.prob["main_freeboard"]
self.draw_pontoons(self.prob["plot_matrix"], 0.5 * self.prob["fairlead_support_outer_diameter"], zcut)
self.draw_column(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"],
self.prob["main.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["main.wall_thickness"])
self.draw_ballast(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"] - t_full,
self.prob["main.permanent_ballast_height"],
self.prob["variable_ballast_height"],
)
self.draw_column(
[0.0, 0.0],
self.prob["hub_height"],
self.prob["tow.tower_section_height"],
0.5 * self.prob["tow.tower_outer_diameter"],
None,
(0.9,) * 3,
)
if self.prob["main.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
self.prob["main.buoyancy_tank_location"],
0.5 * self.prob["main.buoyancy_tank_diameter"],
self.prob["main.buoyancy_tank_height"],
)
self.set_figure(fname)
def draw_semi(self, fname="semi.png"):
self.init_figure()
self.draw_ocean()
self.draw_mooring(self.prob["mooring_plot_matrix"])
pontoonMat = self.prob["plot_matrix"]
zcut = 1.0 + np.maximum(self.prob["main_freeboard"], self.prob["offset_freeboard"])
self.draw_pontoons(pontoonMat, 0.5 * self.prob["pontoon_outer_diameter"], zcut)
self.draw_column(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"],
self.prob["main.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["main.wall_thickness"])
self.draw_ballast(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"] - t_full,
self.prob["main.permanent_ballast_height"],
self.prob["variable_ballast_height"],
)
if self.prob["main.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
self.prob["main.buoyancy_tank_location"],
0.5 * self.prob["main.buoyancy_tank_diameter"],
self.prob["main.buoyancy_tank_height"],
)
R_semi = self.prob["radius_to_offset_column"]
ncolumn = int(self.prob["number_of_offset_columns"])
angles = np.linspace(0, 2 * np.pi, ncolumn + 1)
x = R_semi * np.cos(angles)
y = R_semi * np.sin(angles)
for k in range(ncolumn):
self.draw_column(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
0.5 * self.prob["off.outer_diameter"],
self.prob["off.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["off.wall_thickness"])
self.draw_ballast(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
0.5 * self.prob["off.outer_diameter"] - t_full,
self.prob["off.permanent_ballast_height"],
0.0,
)
if self.prob["off.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
self.prob["off.buoyancy_tank_location"],
0.5 * self.prob["off.buoyancy_tank_diameter"],
self.prob["off.buoyancy_tank_height"],
)
self.draw_column(
[0.0, 0.0],
self.prob["hub_height"],
self.prob["tow.tower_section_height"],
0.5 * self.prob["tow.tower_outer_diameter"],
None,
(0.9,) * 3,
)
self.set_figure(fname)
def init_figure(self):
mysky = np.array([135, 206, 250]) / 255.0
mysky = tuple(mysky.tolist())
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# fig = mlab.figure(bgcolor=(1,)*3, size=(1600,1100))
# fig = mlab.figure(bgcolor=mysky, size=(1600,1100))
self.fig = mlab.figure(bgcolor=(0,) * 3, size=(1600, 1100))
def draw_ocean(self):
if self.fig is None:
self.init_figure()
npts = 100
# mybrown = np.array([244, 170, 66]) / 255.0
# mybrown = tuple(mybrown.tolist())
mywater = np.array([95, 158, 160]) / 255.0 # (0.0, 0.0, 0.8) [143, 188, 143]
mywater = tuple(mywater.tolist())
alpha = 0.3
# Waterplane box
x = y = 100 * np.linspace(-1, 1, npts)
X, Y = np.meshgrid(x, y)
Z = np.sin(100 * X * Y) # np.zeros(X.shape)
# ax.plot_surface(X, Y, Z, alpha=alpha, color=mywater)
mlab.mesh(X, Y, Z, opacity=alpha, color=mywater, figure=self.fig)
# Sea floor
Z = -self.prob["water_depth"] * np.ones(X.shape)
# ax.plot_surface(10*X, 10*Y, Z, alpha=1.0, color=mybrown)
# mlab.mesh(10*X,10*Y,Z, opacity=1.0, color=mybrown, figure=self.fig)
# Sides
# x = 500 * np.linspace(-1, 1, npts)
# z = self.prob['water_depth'] * np.linspace(-1, 0, npts)
# X,Z = np.meshgrid(x,z)
# Y = x.max()*np.ones(Z.shape)
##ax.plot_surface(X, Y, Z, alpha=alpha, color=mywater)
# mlab.mesh(X,Y,Z, opacity=alpha, color=mywater, figure=self.fig)
# mlab.mesh(X,-Y,Z, opacity=alpha, color=mywater, figure=self.fig)
# mlab.mesh(Y,X,Z, opacity=alpha, color=mywater, figure=self.fig)
##mlab.mesh(-Y,X,Z, opacity=alpha, color=mywater, figure=self.fig)
def draw_mooring(self, mooring):
mybrown = np.array([244, 170, 66]) / 255.0
mybrown = tuple(mybrown.tolist())
npts = 100
# Sea floor
print(self.prob["anchor_radius"])
r = np.linspace(0, self.prob["anchor_radius"], npts)
th = | np.linspace(0, 2 * np.pi, npts) | numpy.linspace |
'''
ABOUT:
This python program will run an SVM classifier on an EO-1 ALI scene,
and output a GeoTiff containing the classified scene.
DEPENDS:
gdal
numpy
sklearn
AUTHORS:
<NAME>
<NAME>
HISTORY:
April 2014: Original script (beta).
USE:
For use on the Open Science Data Cloud public data commons.
> python classify.py YEAR DAY IMAGE OUTFILE.PNG
For example, classify an image of the Italian coast from 29/1/2014
> python classify.py 2014 029 EO1A1930292014029110PZ italyClassified.tif
'''
__author__ = '<NAME>'
__version__ = 0.3
# Class: test
#
# Object to set up, run, and save results from an SVM classification
#
import gdal,osr
import scipy.misc as mpimg
import numpy as np
import time
import csv
from sklearn import svm
import numpy.ma as ma
from sklearn.externals import joblib
import os.path
from PIL import Image
import matplotlib.pyplot as plt
class test(object):
# initializer
#
# str: filePre, prefix for EO-1 Scene files
def __init__(self, filePre='', output='test.tif'):
self.fullTrainSet = np.array([])
self.fullTestSet = np.array([])
self.trainSet = np.array([])
self.trainLab = None
self.filePre = filePre
self.output = output
self.createMetadata()
self.bands = np.array([])
self.testSet = np.array([])
self.dims = None
self.mask = None
self.answers = None
self.tester = None
# addRatio
#
# Adds ratio of two bands to test / training set
#
# (int1, int2): ratio, int1:int2 ratio to be added
def addRatio(self,ratio):
numerInd = self.bands[np.where(self.bands==ratio[0]-1)[0]]
denomInd = self.bands[np.where(self.bands==ratio[1]-1)[0]]
numerArr = np.reshape(self.fullTestSet[:,numerInd],(self.fullTestSet.shape[0],1))
denomArr = | np.reshape(self.fullTestSet[:,denomInd],(self.fullTestSet.shape[0],1)) | numpy.reshape |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import numpy as np
from numpy import random
#
from functools import partial
from multiprocessing import Pool
#
from scipy.spatial.distance import pdist, cdist
from scipy.stats import kstwobign, pearsonr
from scipy.stats import genextreme, chi2, norm
from scipy.interpolate import interp1d
from numba import jit
# starvine imports
from starvine.bvcopula.pc_base import PairCopula
def gauss_copula_test(x1, y1, wgts=None, nboot=8000, dist='ks',
alpha=0.05, procs=4, resample=8):
"""!
@brief Tests if a gaussian copula is a good description of the
dep structure of a bivaraiate data set.
@param x1 ndarray, shape (n1, ) where n1 is number of samples
@param y1 ndarray, shape (n1, )
@param wgts ndarray, shape (n1, )
@param dist_metric str. in ('ad', 'ks'):
'ad' for Anderson-Darling, 'ks' for Kolmogorov
@param procs int. number of processes to use. Default=4
@param resample int. Boostrap sample size. Only used if wgts are suppled.
@param alpha float. test significance level. Default=0.05
@return (p_val, d_0, h_dict)
p_val float. p-value of test
d_0 float. Distance metric
h_dict dict. {'h0': Bool} Result of hypothesis test
@note Also works with weighted samples by resampling original data
with replacement.
let h0 be the hypothesis that the gaussian copula fits the data
<NAME>. and <NAME>. Testing the Gaussian Copula
Hypothesis for Financial Asset Dependencies.
Quantitative Finance. Vol 3. pp. 231-250, 2001.
"""
assert nboot >= 80 # require adequate sample size for hypoth test
if wgts is not None:
# reasample weighted data with replacement
pc = PairCopula(x1, y1, weights=wgts, resample=resample)
else:
pc = PairCopula(x1, y1)
# standard normal transform
y_hat_1 = norm.ppf(pc.UU)
y_hat_2 = norm.ppf(pc.VV)
y_hat = np.array([y_hat_1, y_hat_2]).T
# compute cov matrix and pre-compute inverse
cov_hat = np.cov(y_hat.T, bias=True)
cov_hat_inv = np.linalg.inv(cov_hat)
assert cov_hat_inv.shape == (2, 2,)
# est orig distance metric
d_0 = dist_measure(y_hat, cov_hat_inv, dist)
print("KS-Gauss Dist= %f)" % d_0)
# estimate p-value by boostrap resampling
d = np.zeros(nboot)
if procs > 1:
pool = Pool(procs)
d = pool.map(partial(sample_d,
cov_hat=cov_hat,
cov_hat_inv=cov_hat_inv,
dist=dist,
N=len(x1)
),
list(range(nboot)))
d = np.array(d)
pool.close()
else:
for i in range(nboot):
d[i] = sample_d(i, cov_hat, cov_hat_inv, dist, len(x1))
print("KS-Gauss Empirical Dist Range= (%f, %f))" % (np.min(d), np.max(d)))
# compute p-val
# p_val = 1 - d_cdf(d_0)
p_val = (d >= d_0).sum() / len(d)
h_dict = {'h0': p_val > alpha}
return p_val, d_0, h_dict
def sample_d(i, cov_hat, cov_hat_inv, dist, N):
y_sampled = \
np.random.multivariate_normal(mean=[0., 0.],
cov=cov_hat, size=N)
d = dist_measure(y_sampled, cov_hat_inv, dist)
return d
def dist_measure(y_hat, cov_hat_inv, dist):
# gen z^2 RV which should be distributed according to a chi-squared
# distribution if h0 is true (Malevergne 2001)
z_hat_sqrd = test_z_vector(y_hat, cov_hat_inv)
# compute empirical CDF of z_hat_sqrd
F_z_x, F_z_y = ecdf(z_hat_sqrd)
# dof should be ndim (pp. 9 in Malevergrne 2001)
ndim = y_hat.shape[1]
chi2_frozen = chi2(df=ndim, loc=0., scale=1.0)
F_z_chi2 = chi2_frozen.cdf(z_hat_sqrd)
# order lowest to higest (enforce cdf monotone)
F_z_chi2_ = np.array([z_hat_sqrd, F_z_chi2]).T
sorted_F_chi2 = F_z_chi2_[F_z_chi2_[:, 0].argsort()]
F_chi2 = sorted_F_chi2[:, 1]
# check dims
assert len(F_z_y) == len(F_chi2)
# Kolmogorov-Smirnov distance
dist_map_dict = {'ks': 1, 'ks-avg': 2, 'ad': 3, 'ad-avg': 4}
dist_int = dist_map_dict[dist]
d = ks_ad_dist(F_z_y, F_chi2, dist_int)
return d
@jit(nopython=True)
def ks_ad_dist(F_z_y, F_chi2, dist=1):
d = 0.0
if dist == 1:
d = np.max(np.abs(F_z_y - F_chi2))
elif dist == 2:
# more robust to outliers
d = np.mean(np.abs(F_z_y - F_chi2))
else:
numer = np.abs(F_z_y - F_chi2)
denom = np.sqrt(F_chi2 * (1. - F_chi2))
if dist == 3:
d = np.max(numer / denom)
else:
# more robust to outliers
d = np.mean(numer / denom)
return d
@jit(nopython=True)
def test_z_vector(y_hat, cov_inv):
"""!
@brief Helper function for dist_measure
"""
z_hat_sqrd = np.zeros(y_hat.shape[0])
for k in range(y_hat.shape[0]):
for i in range(2):
for j in range(2):
z_hat_sqrd[k] += y_hat[:, i][k] * cov_inv[i, j] * y_hat[:, j][k]
return z_hat_sqrd
@jit(nopython=True)
def ecdf(x):
"""!
@brief Empirical cdf
@param x np_1darray
@return np_1darray empirical cdf
"""
xs = np.sort(x)
ys = np.arange(1, len(xs)+1)/float(len(xs))
return xs, ys
def ks2d2s(x1, y1, x2, y2, nboot=None):
"""!
@brief Two-dimensional Kolmogorov-Smirnov test on two samples.
@param x1 ndarray, shape (n1, )
@param y1 ndarray, shape (n1, )
@param x2 ndarray, shape (n2, )
@param y2 ndarray, shape (n2, )
@return tuple of floats (p-val, KS_stat)
Two-tailed p-value,
KS statistic
@note This is the two-sided K-S test. Small p-values means that the two
samples are significantly different. Note that the p-value is only an
approximation as the analytic distribution is unkonwn. The approximation is
accurate enough when N > ~20 and p-value < ~0.20 or so.
When p-value > 0.20 the value may not be accurate but it implies that the two
samples are not significantly different. (cf. Press 2007)
<NAME>. 1983, Two-Dimensional Goodness-of-Fit Testing in Astronomy,
Monthly Notices of the Royal Astronomical Society, vol. 202, pp. 615-627
<NAME>. and <NAME>. 1987, A Multidimensional Version of the
Kolmogorov-Smirnov Test, Monthly Notices of the Royal Astronomical Society,
vol. 225, pp. 155-170 Press, W.H. et al. 2007, Numerical Recipes, section
14.8
"""
assert (len(x1) == len(y1)) and (len(x2) == len(y2))
n1, n2 = len(x1), len(x2)
D = avgmaxdist(x1, y1, x2, y2)
if nboot is None:
sqen = np.sqrt(n1 * n2 / (n1 + n2))
r1 = pearsonr(x1, y1)[0]
r2 = pearsonr(x2, y2)[0]
r = np.sqrt(1 - 0.5 * (r1**2 + r2**2))
d = D * sqen / (1 + r * (0.25 - 0.75 / sqen))
p = kstwobign.sf(d)
else:
n = n1 + n2
x = np.concatenate([x1, x2])
y = np.concatenate([y1, y2])
d = np.empty(nboot, 'f')
for i in range(nboot):
idx = random.choice(n, n, replace=True)
ix1, ix2 = idx[:n1], idx[n1:]
#ix1 = random.choice(n, n1, replace=True)
#ix2 = random.choice(n, n2, replace=True)
d[i] = avgmaxdist(x[ix1], y[ix1], x[ix2], y[ix2])
p = np.sum(d > D).astype('f') / nboot
return p, D
def avgmaxdist(x1, y1, x2, y2):
D1 = maxdist(x1, y1, x2, y2)
D2 = maxdist(x2, y2, x1, y1)
return (D1 + D2) / 2
@jit(nopython=True)
def maxdist(x1, y1, x2, y2):
n1 = len(x1)
D1 = np.empty((n1, 4))
for i in range(n1):
a1, b1, c1, d1 = quadct(x1[i], y1[i], x1, y1)
a2, b2, c2, d2 = quadct(x1[i], y1[i], x2, y2)
D1[i] = [a1 - a2, b1 - b2, c1 - c2, d1 - d2]
# re-assign the point to maximize difference,
# the discrepancy is significant for N < ~50
D1[:, 0] -= 1 / n1
dmin, dmax = -D1.min(), D1.max() + 1 / n1
return max(dmin, dmax)
@jit(nopython=True)
def quadct(x, y, xx, yy):
n = len(xx)
ix1, ix2 = xx <= x, yy <= y
a = np.sum(ix1 & ix2) / n
b = np.sum(ix1 & ~ix2) / n
c = | np.sum(~ix1 & ix2) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#<NAME> cs528 Final Project
#Simulation of gas particles with partial wall
#Simulation starts by putting all particles on the left side of a wall.
#The wall is removed midway through? At start?
import numpy
import matplotlib.pyplot as plt
import random
import math
import os
def main(args):
print("Hi!")
print("Clearing old plots...")
plotsFolder = "../frames/"
videoFolder = "../video/"
os.system("rm " + plotsFolder + "*.png")
os.system("rm " + videoFolder + "output*")
print("Done!")
numpy.random.seed(3)
# ~ xlow = -10000
# ~ xhigh = 10000
# ~ ylow = -10000
# ~ yhigh = 10000
# ~ xlow = -1000
# ~ xhigh = 1000
# ~ ylow = -1000
# ~ yhigh = 1000
xlow = -100
xhigh = 100
ylow = -100
yhigh = 100
# ~ xlow = -50
# ~ xhigh = 50
# ~ ylow = -100
# ~ yhigh = 100
# ~ xlow = -10
# ~ xhigh = 10
# ~ ylow = -10
# ~ yhigh = 10
# ~ xlow = -4
# ~ xhigh = 4
# ~ ylow = -4
# ~ yhigh = 4
xsize = abs( xhigh - xlow )
ysize = abs( yhigh - ylow )
# ~ numpoints = 10000
numpoints = 5000 #This is good one
# ~ numpoints = 1000
# ~ numpoints = 100
# ~ numpoints = 10
# ~ numSteps = 100000
# ~ numSteps = 50000
numSteps = 30000
numFrames = 30
stepSizeGraphing = numSteps // numFrames + 1
#Get xhigh up to wall
wallstop = getxhighAbutment(xsize, xlow)
# ~ print("wallstop: " + str(wallstop) + "\txlow: " + str(xlow) + "\txsize: " + str(xsize))
#Create array to hold x and y positions of particles
xpositions = numpy.random.randint(xlow, wallstop, numpoints)
ypositions = numpy.random.randint(ylow, yhigh + 1, numpoints)
xwallArray, ywallArray = createWallArray(xsize, ysize, xlow, xhigh, ylow, yhigh)
print("Simulating...")
simulate(numSteps, xpositions, ypositions, xwallArray, ywallArray, xlow, xhigh, ylow, yhigh, ysize, stepSizeGraphing, plotsFolder)
print("Done")
print("Creating videos...")
createAnimations(plotsFolder, videoFolder)
print("Done!")
return 0
#Gets position of the left side of wall
def getxhighAbutment(xsize, xlow):
wallxpos = xsize // 2
#if xsize even, sub 1
if xsize % 2 == 0:
wallxpos -= 1
return wallxpos + xlow
#Create array that holds the position of wall obstacles.
#Wall obstacles should always be at least 2x2 points!!!!!!!!!!!!!!!!
#Ones are walls, zeroes are spaces.
#There is a wall going down the middle with a hole in it in this version.
def createWallArray(xsize, ysize, xlow, xhigh, ylow, yhigh):
# ~ empty = []
# ~ xwallArray = numpy.array(empty)
# ~ ywallArray = numpy.array(empty)
xwallArray = []
ywallArray = []
wallxpos = getxhighAbutment(xsize, xlow)
# ~ print("wallxpos: " + str(wallxpos))
# ~ yFirstThird = (ysize // 3) + ylow
# ~ ySecondThird = ((ysize * 2) // 3) + ylow
yFirstThird, ySecondThird = yPartitions(ysize, ylow)
for i in range(ylow, yhigh + 1):
if i <= yFirstThird or i >= ySecondThird:
# ~ print("ysize // 3 = " + str(ysize // 3) + "\ti: " + str(i) + "\ti < (ysize // 3): " + str(i < (ysize // 3)) + "\ti > ((2 * ysize) // 3): " + str(i > ((2 * ysize) // 3)))
xwallArray.append(wallxpos)
ywallArray.append(i)
xwallArray.append(wallxpos + 1)
ywallArray.append(i)
# ~ return xwallArray, ywallArray
return numpy.array(xwallArray), numpy.array(ywallArray)
#Returns the y partition points for the wall.
def yPartitions(ysize, ylow):
# ~ yFirstThird = (ysize // 3) + ylow
# ~ ySecondThird = ((ysize * 2) // 3) + ylow
# ~ yFirstThird = ((48 * ysize) // 100) + ylow
# ~ ySecondThird = ((ysize * 52) // 100) + ylow
yFirstThird = ((0 * ysize) // 100) + ylow
ySecondThird = ((ysize * 10) // 100) + ylow
return yFirstThird, ySecondThird
#runs the simulation for some number of steps.
def simulate(numSteps, xpositions, ypositions, xwallArray, ywallArray, xlow, xhigh, ylow, yhigh, ysize, stepSizeGraph, plotsFolder):
#print initial condition
printSimulation(xpositions, ypositions, xwallArray, ywallArray, xlow, xhigh, ylow, yhigh, 0, plotsFolder)
movesArray = numpy.random.randint(1, 5, size=(numSteps, len(xpositions)))
NORTH = 1
EAST = 2
SOUTH = 3
WEST = 4
for step in range(1, numSteps):
#random walk the arrays.
xpositions -= numpy.where(movesArray[step] == WEST, 1, 0)
xpositions += numpy.where(movesArray[step] == EAST, 1, 0)
ypositions += numpy.where(movesArray[step] == NORTH, 1, 0)
ypositions -= | numpy.where(movesArray[step] == SOUTH, 1, 0) | numpy.where |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hamiltonians that are quadratic in the fermionic ladder operators."""
import warnings
import numpy
from scipy.linalg import schur
from openfermion.ops import PolynomialTensor
from openfermion.ops._givens_rotations import (
fermionic_gaussian_decomposition,
givens_decomposition_square,
swap_columns,
swap_rows)
class QuadraticHamiltonianError(Exception):
pass
class QuadraticHamiltonian(PolynomialTensor):
r"""Class for storing Hamiltonians that are quadratic in the fermionic
ladder operators. The operators stored in this class take the form
.. math::
\sum_{p, q} (M_{pq} - \mu \delta_{pq}) a^\dagger_p a_q
+ \frac12 \sum_{p, q}
(\Delta_{pq} a^\dagger_p a^\dagger_q + \text{h.c.})
+ \text{constant}
where
- :math:`M` is a Hermitian `n_qubits` x `n_qubits` matrix.
- :math:`\Delta` is an antisymmetric `n_qubits` x `n_qubits` matrix.
- :math:`\mu` is a real number representing the chemical potential.
- :math:`\delta_{pq}` is the Kronecker delta symbol.
We separate the chemical potential :math:`\mu` from :math:`M` so that
we can use it to adjust the expectation value of the total number of
particles.
Attributes:
chemical_potential(float): The chemical potential :math:`\mu`.
"""
def __init__(self, hermitian_part, antisymmetric_part=None,
constant=0.0, chemical_potential=0.0):
r"""
Initialize the QuadraticHamiltonian class.
Args:
hermitian_part(ndarray): The matrix :math:`M`, which represents the
coefficients of the particle-number-conserving terms.
This is an `n_qubits` x `n_qubits` numpy array of complex
numbers.
antisymmetric_part(ndarray): The matrix :math:`\Delta`,
which represents the coefficients of the
non-particle-number-conserving terms.
This is an `n_qubits` x `n_qubits` numpy array of complex
numbers.
constant(float, optional): A constant term in the operator.
chemical_potential(float, optional): The chemical potential
:math:`\mu`.
"""
n_qubits = hermitian_part.shape[0]
# Initialize combined Hermitian part
if not chemical_potential:
combined_hermitian_part = hermitian_part
else:
combined_hermitian_part = (
hermitian_part - chemical_potential * numpy.eye(n_qubits))
# Initialize the PolynomialTensor
if antisymmetric_part is None:
super(QuadraticHamiltonian, self).__init__(
{(): constant, (1, 0): combined_hermitian_part})
else:
super(QuadraticHamiltonian, self).__init__(
{(): constant, (1, 0): combined_hermitian_part,
(1, 1): 0.5 * antisymmetric_part,
(0, 0): -0.5 * antisymmetric_part.conj()})
# Add remaining attributes
self.chemical_potential = chemical_potential
@property
def combined_hermitian_part(self):
"""The Hermitian part including the chemical potential."""
return self.n_body_tensors[1, 0]
@property
def antisymmetric_part(self):
"""The antisymmetric part."""
if (1, 1) in self.n_body_tensors:
return 2. * self.n_body_tensors[1, 1]
else:
return numpy.zeros((self.n_qubits, self.n_qubits), complex)
@property
def hermitian_part(self):
"""The Hermitian part not including the chemical potential."""
return (self.combined_hermitian_part +
self.chemical_potential * numpy.eye(self.n_qubits))
@property
def conserves_particle_number(self):
"""Whether this Hamiltonian conserves particle number."""
discrepancy = numpy.max(numpy.abs(self.antisymmetric_part))
return numpy.isclose(discrepancy, 0.0)
def add_chemical_potential(self, chemical_potential):
"""Increase (or decrease) the chemical potential by some value."""
self.n_body_tensors[1, 0] -= (chemical_potential *
numpy.eye(self.n_qubits))
self.chemical_potential += chemical_potential
def ground_energy(self):
"""Return the ground energy."""
orbital_energies, _, constant = (
self.diagonalizing_bogoliubov_transform())
return numpy.sum(orbital_energies[
numpy.where(orbital_energies < 0.0)[0]]) + constant
def majorana_form(self):
r"""Return the Majorana represention of the Hamiltonian.
Any quadratic Hamiltonian can be written in the form
.. math::
\frac{i}{2} \sum_{j, k} A_{jk} f_j f_k + \text{constant}
where the :math:`f_i` are normalized Majorana fermion operators:
.. math::
f_j = \frac{1}{\sqrt{2}} (a^\dagger_j + a_j)
f_{j + N} = \frac{i}{\sqrt{2}} (a^\dagger_j - a_j)
and :math:`A` is a (2 * `n_qubits`) x (2 * `n_qubits`) real
antisymmetric matrix. This function returns the matrix
:math:`A` and the constant.
"""
hermitian_part = self.combined_hermitian_part
antisymmetric_part = self.antisymmetric_part
# Compute the Majorana matrix using block matrix manipulations
majorana_matrix = numpy.zeros((2 * self.n_qubits, 2 * self.n_qubits))
# Set upper left block
majorana_matrix[:self.n_qubits, :self.n_qubits] = numpy.real(-0.5j * (
hermitian_part - hermitian_part.conj() +
antisymmetric_part - antisymmetric_part.conj()))
# Set upper right block
majorana_matrix[:self.n_qubits, self.n_qubits:] = numpy.real(0.5 * (
hermitian_part + hermitian_part.conj() -
antisymmetric_part - antisymmetric_part.conj()))
# Set lower left block
majorana_matrix[self.n_qubits:, :self.n_qubits] = numpy.real(-0.5 * (
hermitian_part + hermitian_part.conj() +
antisymmetric_part + antisymmetric_part.conj()))
# Set lower right block
majorana_matrix[self.n_qubits:, self.n_qubits:] = numpy.real(-0.5j * (
hermitian_part - hermitian_part.conj() -
antisymmetric_part + antisymmetric_part.conj()))
# Compute the constant
majorana_constant = (0.5 * numpy.real(numpy.trace(hermitian_part)) +
self.n_body_tensors[()])
return majorana_matrix, majorana_constant
def diagonalizing_bogoliubov_transform(self, spin_sector=None):
r"""Compute the unitary that diagonalizes a quadratic Hamiltonian.
Any quadratic Hamiltonian can be rewritten in the form
.. math::
\sum_{j} \varepsilon_j b^\dagger_j b_j + \text{constant},
where the :math:`b^\dagger_j` are a new set fermionic creation
operators that satisfy the canonical anticommutation relations.
The new creation operators are linear combinations of the
original ladder operators. In the most general case, creation and
annihilation operators are mixed together:
.. math::
\begin{pmatrix}
b^\dagger_1 \\
\vdots \\
b^\dagger_N \\
\end{pmatrix}
= W
\begin{pmatrix}
a^\dagger_1 \\
\vdots \\
a^\dagger_N \\
a_1 \\
\vdots \\
a_N
\end{pmatrix},
where :math:`W` is an :math:`N \times (2N)` matrix.
However, if the Hamiltonian conserves particle number then
creation operators don't need to be mixed with annihilation operators
and :math:`W` only needs to be an :math:`N \times N` matrix:
.. math::
\begin{pmatrix}
b^\dagger_1 \\
\vdots \\
b^\dagger_N \\
\end{pmatrix}
= W
\begin{pmatrix}
a^\dagger_1 \\
\vdots \\
a^\dagger_N \\
\end{pmatrix},
This method returns the matrix :math:`W`.
Args:
spin_sector (optional str): An optional integer specifying
a spin sector to restrict to: 0 for spin-up and 1 for
spin-down. Should only be specified if the Hamiltonian
includes a spin degree of freedom and spin-up modes
do not interact with spin-down modes. If specified,
the modes are assumed to be ordered so that spin-up orbitals
come before spin-down orbitals.
Returns:
orbital_energies(ndarray)
A one-dimensional array containing the :math:`\varepsilon_j`
diagonalizing_unitary (ndarray):
A matrix representing the transformation :math:`W` of the
fermionic ladder operators. If the Hamiltonian conserves
particle number then this is :math:`N \times N`; otherwise
it is :math:`N \times 2N`. If spin sector is specified,
then `N` here represents the number of spatial orbitals
rather than spin orbitals.
constant(float)
The constant
"""
n_modes = self.combined_hermitian_part.shape[0]
if spin_sector is not None and n_modes % 2:
raise ValueError(
'Spin sector was specified but Hamiltonian contains '
'an odd number of modes'
)
if self.conserves_particle_number:
return self._particle_conserving_bogoliubov_transform(spin_sector)
else:
# TODO implement this
if spin_sector is not None:
raise NotImplementedError(
'Specifying spin sector for non-particle-conserving '
'Hamiltonians is not yet supported.'
)
return self._non_particle_conserving_bogoliubov_transform(
spin_sector)
def _particle_conserving_bogoliubov_transform(self, spin_sector):
n_modes = self.combined_hermitian_part.shape[0]
if spin_sector is not None:
n_sites = n_modes // 2
def index_map(i):
return i + spin_sector*n_sites
spin_indices = [index_map(i) for i in range(n_sites)]
matrix = self.combined_hermitian_part[
numpy.ix_(spin_indices, spin_indices)]
orbital_energies, diagonalizing_unitary_T = numpy.linalg.eigh(
matrix)
else:
matrix = self.combined_hermitian_part
if _is_spin_block_diagonal(matrix):
up_block = matrix[:n_modes//2, :n_modes//2]
down_block = matrix[n_modes//2:, n_modes//2:]
up_orbital_energies, up_diagonalizing_unitary_T = (
numpy.linalg.eigh(up_block))
down_orbital_energies, down_diagonalizing_unitary_T = (
numpy.linalg.eigh(down_block))
orbital_energies = numpy.concatenate(
(up_orbital_energies, down_orbital_energies))
diagonalizing_unitary_T = numpy.block([
[up_diagonalizing_unitary_T,
| numpy.zeros((n_modes//2, n_modes//2)) | numpy.zeros |
"""process mask tools
method:
convert_one_hot
extract_bbox
dilation_mask
erosion_mask
remove_small_connected_object
extract_largest_connected_object
keep_KthLargest_connected_object
smooth_mask
extract_left_right_bbox
"""
import numpy as np
from skimage import measure
from skimage.morphology import label
from scipy.ndimage.morphology import generate_binary_structure, binary_closing, \
binary_erosion, binary_dilation
def convert_one_hot(mask, s_idx, num_classes):
"""Convert mask label into one hot coding."""
masks = []
for i_label in range(s_idx, num_classes + s_idx):
mask_i = mask == i_label
masks.append(mask_i)
mask_czyx = np.stack(masks, axis=0)
mask_czyx = mask_czyx.astype(np.float32)
return mask_czyx
def convert_ribCenterline_one_hot(mask, s_idx, num_classes):
"""Convert rib and centerline mask into one hot coding."""
masks = []
for i_label in range(s_idx, num_classes + s_idx):
mask_i = mask.copy()
if i_label == 1:
mask_i[mask_i != 0] = 1
else:
mask_i[mask_i != i_label] = 0
mask_i[mask_i == i_label] = 1
masks.append(mask_i)
mask_czyx = np.stack(masks, axis=0)
mask_czyx = mask_czyx.astype(np.float32)
return mask_czyx
def extract_bbox(mask):
"""extract object bbox"""
t_mask = mask > 0
zz, yy, xx = np.where(t_mask)
bbox = np.array([[np.min(zz), np.max(zz)], [np.min(yy), np.max(yy)],
[np.min(xx), | np.max(xx) | numpy.max |
import os
import sys
import yaml
import cv2
import numpy as np
import datetime
import copy
import transforms3d as tf3d
import time
import random
import json
import open3d
import math
from pathlib import Path
from misc import manipulate_RGB, toPix_array, toPix, calculate_feature_visibility
# Import bop_renderer and bop_toolkit.
# ------------------------------------------------------------------------------
bop_renderer_path = '/home/stefan/bop_renderer/build'
sys.path.append(bop_renderer_path)
import bop_renderer
def lookAt(eye, target, up):
# eye is from
# target is to
# expects numpy arrays
f = eye - target
f = f/np.linalg.norm(f)
s = np.cross(up, f)
s = s/np.linalg.norm(s)
u = | np.cross(f, s) | numpy.cross |
# -*- coding: utf-8 -*-
##
# \file plot_impedance.py
# \title Show the real and imaginary parts of the surface impedance.
# \author <NAME>
# \version 0.1
# \license BSD 3-Clause License
# \inst UMRAE (Ifsttar Nantes), LAUM (Le Mans Université)
# \date 2017, 17 Oct.
##
import numpy as np
from scipy import special as sp
import os
import site
from matplotlib import pyplot as plt
base_path = reduce (lambda l,r: l + os.path.sep + r,
os.path.dirname( os.path.realpath( __file__ ) ).split( os.path.sep ) )
tools_path = os.path.join(base_path.rsplit(os.sep, 1)[0],'tools')
site.addsitedir(tools_path)
from miki_imp_model import Miki
from get_imped_coefts import get_coefts_Miki
def plot_surface_imp(sigma, rho, c, f, f_max_src):
"""
:param sigma: specific airflow resistivity, float (kNm-4s==CGS).
:param rho: air density, float (kg.m-3).
:param c: sound speed, scalar (m.s-1).
:param f: frequency sequence, 1d list of floats (Hz).
:param f_max_src: maximal frequency, float (Hz)
:return: the real and imaginary parts of the surface impedance.
"""
#==============================================================================
# Check the impedance coefficients (btw. Miki and the coef.)
#==============================================================================
omega = 2.*np.pi*f
k = omega/c
Zg, k_miki = Miki(-1,f,sigma,rho,c)
K = 6
a_k, gamma_k, a_k_ncor= get_coefts_Miki(K, sigma)
am = 5.50
bm = -0.632
mu = (am/((2.*np.pi*sigma)**bm))/np.sin(((bm +1.)*np.pi)/2.)
sum_k = np.zeros((len(omega)),dtype=np.complex128)
for n in range(K):
sum_k += a_k_ncor[n] / (gamma_k[n]- 1j*omega) # scalar
Zomega = np.zeros((len(omega)),dtype=np.complex128)
Zomega = rho*c*(1. + (mu/sp.gamma(-bm))*sum_k)
plt.figure('Surface impedance')
plt.semilogx(f, np.real(Zg/(rho*c)), 'k-', lw=2)
plt.semilogx(f, np.imag(Zg/(rho*c)), 'g-', lw=2)
plt.semilogx(f, | np.real(Zomega/(rho*c)) | numpy.real |
'''This class will log 1d array in Nd matrix from device and qualisys object'''
import numpy as np
from datetime import datetime as datetime
from time import time
from utils_mpc import quaternionToRPY
class LoggerControl():
def __init__(self, dt, N0_gait, joystick=None, estimator=None, loop=None, gait=None, statePlanner=None,
footstepPlanner=None, footTrajectoryGenerator=None, logSize=60e3, ringBuffer=False):
self.ringBuffer = ringBuffer
logSize = np.int(logSize)
self.logSize = logSize
self.i = 0
self.dt = dt
# Allocate the data:
# Joystick
self.joy_v_ref = np.zeros([logSize, 6]) # reference velocity of the joystick
# Estimator
self.esti_feet_status = np.zeros([logSize, 4]) # input feet status (contact or not)
self.esti_feet_goals = np.zeros([logSize, 3, 4]) # input feet goals (desired on the ground)
self.esti_q_filt = np.zeros([logSize, 19]) # output position
self.esti_v_filt = np.zeros([logSize, 18]) # output velocity
self.esti_v_secu = np.zeros([logSize, 12]) # filtered output velocity for security check
self.esti_FK_lin_vel = np.zeros([logSize, 3]) # estimated velocity of the base with FK
self.esti_FK_xyz = np.zeros([logSize, 3]) # estimated position of the base with FK
self.esti_xyz_mean_feet = np.zeros([logSize, 3]) # average of feet goals
self.esti_filt_lin_vel = np.zeros([logSize, 3]) # estimated velocity of the base before low pass filter
self.esti_HP_x = np.zeros([logSize, 3]) # x input of the velocity complementary filter
self.esti_HP_dx = np.zeros([logSize, 3]) # dx input of the velocity complementary filter
self.esti_HP_alpha = np.zeros([logSize, 3]) # alpha parameter of the velocity complementary filter
self.esti_HP_filt_x = np.zeros([logSize, 3]) # filtered output of the velocity complementary filter
self.esti_LP_x = np.zeros([logSize, 3]) # x input of the position complementary filter
self.esti_LP_dx = np.zeros([logSize, 3]) # dx input of the position complementary filter
self.esti_LP_alpha = np.zeros([logSize, 3]) # alpha parameter of the position complementary filter
self.esti_LP_filt_x = np.zeros([logSize, 3]) # filtered output of the position complementary filter
self.esti_kf_X = np.zeros([logSize, 18]) # state of the Kalman filter
self.esti_kf_Z = np.zeros([logSize, 16]) # measurement for the Kalman filter
# Loop
self.loop_o_q_int = np.zeros([logSize, 19]) # position in world frame (esti_q_filt + dt * loop_o_v)
self.loop_o_v = np.zeros([logSize, 18]) # estimated velocity in world frame
self.loop_h_v = np.zeros([logSize, 18]) # estimated velocity in horizontal frame
self.loop_pos_virtual_world = np.zeros([logSize, 3]) # x, y, yaw perfect position in world
# Gait
self.planner_gait = np.zeros([logSize, N0_gait, 4]) # Gait sequence
self.planner_is_static = np.zeros([logSize]) # if the planner is in static mode or not
self.planner_q_static = np.zeros([logSize, 19]) # position in static mode (4 stance phase)
self.planner_RPY_static = np.zeros([logSize, 3]) # RPY orientation in static mode (4 stance phase)
# State planner
if statePlanner is not None:
self.planner_xref = np.zeros([logSize, 12, 1+statePlanner.getNSteps()]) # Reference trajectory
# Footstep planner
if gait is not None:
self.planner_fsteps = np.zeros([logSize, gait.getCurrentGait().shape[0], 12]) # Reference footsteps position
self.planner_h_ref = np.zeros([logSize]) # reference height of the planner
# Foot Trajectory Generator
self.planner_goals = np.zeros([logSize, 3, 4]) # 3D target feet positions
self.planner_vgoals = np.zeros([logSize, 3, 4]) # 3D target feet velocities
self.planner_agoals = np.zeros([logSize, 3, 4]) # 3D target feet accelerations
# Model Predictive Control
# output vector of the MPC (next state + reference contact force)
if statePlanner is not None:
self.mpc_x_f = np.zeros([logSize, 24, statePlanner.getNSteps()])
# Whole body control
self.wbc_x_f = np.zeros([logSize, 24]) # input vector of the WBC (next state + reference contact force)
self.wbc_P = np.zeros([logSize, 12]) # proportionnal gains of the PD+
self.wbc_D = np.zeros([logSize, 12]) # derivative gains of the PD+
self.wbc_q_des = np.zeros([logSize, 12]) # desired position of actuators
self.wbc_v_des = np.zeros([logSize, 12]) # desired velocity of actuators
self.wbc_tau_ff = np.zeros([logSize, 12]) # feedforward torques computed by the WBC
self.wbc_f_ctc = np.zeros([logSize, 12]) # contact forces computed by the WBC
self.wbc_feet_pos = np.zeros([logSize, 3, 4]) # current feet positions according to WBC
self.wbc_feet_pos_target = np.zeros([logSize, 3, 4]) # current feet positions targets for WBC
self.wbc_feet_err = np.zeros([logSize, 3, 4]) # error between feet positions and their reference
self.wbc_feet_vel = np.zeros([logSize, 3, 4]) # current feet velocities according to WBC
self.wbc_feet_vel_target = np.zeros([logSize, 3, 4]) # current feet velocities targets for WBC
self.wbc_feet_acc_target = np.zeros([logSize, 3, 4]) # current feet accelerations targets for WBC
self.wbc_feet_pos_invkin = np.zeros([logSize, 3, 4]) # current feet positions according to InvKin
self.wbc_feet_vel_invkin = np.zeros([logSize, 3, 4]) # current feet velocities according to InvKin
# Timestamps
self.tstamps = np.zeros(logSize)
def sample(self, joystick, estimator, loop, gait, statePlanner, footstepPlanner, footTrajectoryGenerator, wbc):
if (self.i >= self.logSize):
if self.ringBuffer:
self.i = 0
else:
return
# Logging from joystick
self.joy_v_ref[self.i] = joystick.v_ref[:, 0]
# Logging from estimator
self.esti_feet_status[self.i] = estimator.feet_status[:]
self.esti_feet_goals[self.i] = estimator.feet_goals
self.esti_q_filt[self.i] = estimator.q_filt[:, 0]
self.esti_v_filt[self.i] = estimator.v_filt[:, 0]
self.esti_v_secu[self.i] = estimator.v_secu[:]
self.esti_FK_lin_vel[self.i] = estimator.FK_lin_vel[:]
self.esti_FK_xyz[self.i] = estimator.FK_xyz[:]
self.esti_xyz_mean_feet[self.i] = estimator.xyz_mean_feet[:]
self.esti_filt_lin_vel[self.i] = estimator.filt_lin_vel[:]
if not estimator.kf_enabled:
self.esti_HP_x[self.i] = estimator.filter_xyz_vel.x
self.esti_HP_dx[self.i] = estimator.filter_xyz_vel.dx
self.esti_HP_alpha[self.i] = estimator.filter_xyz_vel.alpha
self.esti_HP_filt_x[self.i] = estimator.filter_xyz_vel.filt_x
self.esti_LP_x[self.i] = estimator.filter_xyz_pos.x
self.esti_LP_dx[self.i] = estimator.filter_xyz_pos.dx
self.esti_LP_alpha[self.i] = estimator.filter_xyz_pos.alpha
self.esti_LP_filt_x[self.i] = estimator.filter_xyz_pos.filt_x
else:
self.esti_kf_X[self.i] = estimator.kf.X[:, 0]
self.esti_kf_Z[self.i] = estimator.Z[:, 0]
# Logging from the main loop
self.loop_o_q_int[self.i] = loop.q[:, 0]
self.loop_o_v[self.i] = loop.v[:, 0]
self.loop_h_v[self.i] = loop.h_v[:, 0]
self.loop_pos_virtual_world[self.i] = np.array([loop.q[0, 0], loop.q[1, 0], loop.yaw_estim])
# Logging from the planner
# self.planner_q_static[self.i] = planner.q_static[:]
# self.planner_RPY_static[self.i] = planner.RPY_static[:, 0]
self.planner_xref[self.i] = statePlanner.getReferenceStates()
self.planner_fsteps[self.i] = footstepPlanner.getFootsteps()
self.planner_gait[self.i] = gait.getCurrentGait()
self.planner_goals[self.i] = footTrajectoryGenerator.getFootPosition()
self.planner_vgoals[self.i] = footTrajectoryGenerator.getFootVelocity()
self.planner_agoals[self.i] = footTrajectoryGenerator.getFootAcceleration()
self.planner_is_static[self.i] = gait.getIsStatic()
self.planner_h_ref[self.i] = loop.h_ref
# Logging from model predictive control
self.mpc_x_f[self.i] = loop.x_f_mpc
# Logging from whole body control
self.wbc_x_f[self.i] = loop.x_f_wbc
self.wbc_P[self.i] = loop.result.P
self.wbc_D[self.i] = loop.result.D
self.wbc_q_des[self.i] = loop.result.q_des
self.wbc_v_des[self.i] = loop.result.v_des
self.wbc_tau_ff[self.i] = loop.result.tau_ff
self.wbc_f_ctc[self.i] = wbc.f_with_delta[:, 0]
self.wbc_feet_pos[self.i] = wbc.feet_pos
self.wbc_feet_pos_target[self.i] = wbc.log_feet_pos_target[:, :, self.i+1]
self.wbc_feet_err[self.i] = wbc.feet_err
self.wbc_feet_vel[self.i] = wbc.feet_vel
self.wbc_feet_vel_target[self.i] = wbc.log_feet_vel_target[:, :, self.i+1]
self.wbc_feet_acc_target[self.i] = wbc.log_feet_acc_target[:, :, self.i+1]
self.wbc_feet_pos_invkin[self.i] = wbc.invKin.cpp_posf.transpose()
self.wbc_feet_vel_invkin[self.i] = wbc.invKin.cpp_vf.transpose()
# Logging timestamp
self.tstamps[self.i] = time()
self.i += 1
def processMocap(self, N, loggerSensors):
self.mocap_b_v = np.zeros([N, 3])
self.mocap_b_w = np.zeros([N, 3])
self.mocap_RPY = np.zeros([N, 3])
for i in range(N):
oRb = loggerSensors.mocapOrientationMat9[i]
"""from IPython import embed
embed()"""
self.mocap_b_v[i] = (oRb.transpose() @ loggerSensors.mocapVelocity[i].reshape((3, 1))).ravel()
self.mocap_b_w[i] = (oRb.transpose() @ loggerSensors.mocapAngularVelocity[i].reshape((3, 1))).ravel()
self.mocap_RPY[i] = quaternionToRPY(loggerSensors.mocapOrientationQuat[i])[:, 0]
def plotAll(self, loggerSensors):
from matplotlib import pyplot as plt
N = self.tstamps.shape[0]
t_range = np.array([k*self.dt for k in range(N)])
self.processMocap(N, loggerSensors)
index6 = [1, 3, 5, 2, 4, 6]
index12 = [1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12]
"""plt.figure()
for i in range(4):
if i == 0:
ax0 = plt.subplot(2, 2, i+1)
else:
plt.subplot(2, 2, i+1, sharex=ax0)
switch = np.diff(self.esti_feet_status[:, i])
tmp = self.wbc_feet_pos[:-1, 2, i]
tmp_y = tmp[switch > 0]
tmp_x = t_range[:-1]
tmp_x = tmp_x[switch > 0]
plt.plot(tmp_x, tmp_y, linewidth=3)"""
lgd_X = ["FL", "FR", "HL", "HR"]
lgd_Y = ["Pos X", "Pos Y", "Pos Z"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
plt.plot(t_range, self.wbc_feet_pos[:, i % 3, np.int(i/3)], color='b', linewidth=3, marker='')
plt.plot(t_range, self.wbc_feet_err[:, i % 3, np.int(i/3)] + self.wbc_feet_pos[0, i % 3, np.int(i/3)], color='g', linewidth=3, marker='')
plt.plot(t_range, self.wbc_feet_pos_target[:, i % 3, np.int(i/3)], color='r', linewidth=3, marker='')
"""plt.plot(t_range, self.wbc_feet_pos_invkin[:, i % 3, np.int(i/3)],
color='darkviolet', linewidth=3, linestyle="--", marker='')"""
if (i % 3) == 2:
mini = np.min(self.wbc_feet_pos[:, i % 3, np.int(i/3)])
maxi = np.max(self.wbc_feet_pos[:, i % 3, np.int(i/3)])
plt.plot(t_range, self.planner_gait[:, 0, np.int(
i/3)] * (maxi - mini) + mini, color='k', linewidth=3, marker='')
plt.legend([lgd_Y[i % 3] + " " + lgd_X[np.int(i/3)]+"", "error",
lgd_Y[i % 3] + " " + lgd_X[np.int(i/3)]+" Ref", "Contact state"], prop={'size': 8})
plt.suptitle("Measured & Reference feet positions (base frame)")
lgd_X = ["FL", "FR", "HL", "HR"]
lgd_Y = ["Vel X", "Vel Y", "Vel Z"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
plt.plot(t_range, self.wbc_feet_vel[:, i % 3, np.int(i/3)], color='b', linewidth=3, marker='')
plt.plot(t_range, self.wbc_feet_vel_target[:, i % 3, np.int(i/3)], color='r', linewidth=3, marker='')
"""plt.plot(t_range, self.wbc_feet_vel_invkin[:, i % 3, np.int(i/3)],
color='darkviolet', linewidth=3, linestyle="--", marker='')"""
plt.legend([lgd_Y[i % 3] + " " + lgd_X[np.int(i/3)], lgd_Y[i %
3] + " " + lgd_X[np.int(i/3)]+" Ref"], prop={'size': 8})
plt.suptitle("Measured and Reference feet velocities (base frame)")
lgd_X = ["FL", "FR", "HL", "HR"]
lgd_Y = ["Acc X", "Acc Y", "Acc Z"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
plt.plot(t_range, self.wbc_feet_acc_target[:, i % 3, np.int(i/3)], color='r', linewidth=3, marker='')
plt.legend([lgd_Y[i % 3] + " " + lgd_X[ | np.int(i/3) | numpy.int |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
"""
Functions to fit gaussian functions to the detected RNA spots, especially in
clustered regions.
"""
import warnings
import numpy as np
import bigfish.stack as stack
from .utils import get_sigma, get_radius
from scipy.special import erf
from scipy.optimize import curve_fit
from skimage.measure import regionprops
from skimage.measure import label
# ### Main function ###
def decompose_cluster(image, spots, voxel_size_z=None, voxel_size_yx=100,
psf_z=None, psf_yx=200, alpha=0.5, beta=1):
"""Detect potential regions with clustered spots and fit as many reference
spots as possible in these regions.
1) We estimate image background with a large gaussian filter. We then
remove the background from the original image to denoise it.
2) We build a reference spot by aggregating predetected spots.
3) We fit a gaussian function on the reference spots.
4) We detect potential clustered regions to decompose.
5) We simulate as many gaussians as possible in the candidate regions.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x) or (y, x).
spots : np.ndarray, np.int64
Coordinate of the spots with shape (nb_spots, 3) or (nb_spots, 2)
for 3-d or 2-d images respectively.
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, image is
considered in 2-d.
# TODO Error returned with a float
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
psf_z : int or float or None
Theoretical size of the PSF emitted by a spot in the z plan,
in nanometer. If None, image is considered in 2-d.
psf_yx : int or float
Theoretical size of the PSF emitted by a spot in the yx plan,
in nanometer.
alpha : int or float
Intensity score of the reference spot, between 0 and 1. The higher,
the brighter are the spots fitted in the clusters. Consequently, a high
intensity score reduces the number of spots per cluster. Default is
0.5.
beta : int or float
Multiplicative factor for the intensity threshold of a cluster region.
Default is 1. Threshold is computed with the formula :
threshold = beta * max(median_spot)
Returns
-------
spots : np.ndarray, np.int64
Coordinate of the spots detected, with shape (nb_spots, 3) or
(nb_spots, 2). One coordinate per dimension (zyx or yx coordinates).
clusters : np.ndarray, np.int64
Array with shape (nb_cluster, 7) or (nb_cluster, 6). One coordinate
per dimension for the cluster centroid (zyx or yx coordinates), the
number of RNAs detected in the cluster, the area of the cluster
region, its average intensity value and its index.
reference_spot : np.ndarray
Reference spot in 3-d or 2-d.
"""
# check parameters
stack.check_array(image,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.float32, np.float64])
stack.check_array(spots, ndim=2, dtype=np.int64)
stack.check_parameter(voxel_size_z=(int, float, type(None)),
voxel_size_yx=(int, float),
psf_z=(int, float, type(None)),
psf_yx=(int, float),
alpha=(int, float),
beta=(int, float))
if alpha < 0 or alpha > 1:
raise ValueError("'alpha' should be a value between 0 and 1, not {0}"
.format(alpha))
if beta < 0:
raise ValueError("'beta' should be a positive value, not {0}"
.format(beta))
# check number of dimensions
ndim = image.ndim
if ndim == 3 and voxel_size_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'voxel_size_z' parameter is missing.".format(ndim))
if ndim == 3 and psf_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'psf_z' parameter is missing.".format(ndim))
if ndim != spots.shape[1]:
raise ValueError("Provided image has {0} dimensions but spots are "
"detected in {1} dimensions."
.format(ndim, spots.shape[1]))
if ndim == 2:
voxel_size_z, psf_z = None, None
# case where no spot were detected
if spots.size == 0:
cluster = np.array([], dtype=np.int64).reshape((0, ndim + 4))
reference_spot = np.zeros((5,) * ndim, dtype=image.dtype)
return spots, cluster, reference_spot
# compute expected standard deviation of the spots
sigma = get_sigma(voxel_size_z, voxel_size_yx, psf_z, psf_yx)
large_sigma = tuple([sigma_ * 5 for sigma_ in sigma])
# denoise the image
image_denoised = stack.remove_background_gaussian(
image,
sigma=large_sigma)
# build a reference median spot
reference_spot = build_reference_spot(
image_denoised,
spots,
voxel_size_z, voxel_size_yx, psf_z, psf_yx,
alpha)
# case with an empty frame as reference spot
if reference_spot.sum() == 0:
cluster = np.array([], dtype=np.int64).reshape((0, ndim + 4))
return spots, cluster, reference_spot
# fit a gaussian function on the reference spot to be able to simulate it
parameters_fitted = modelize_spot(
reference_spot, voxel_size_z, voxel_size_yx, psf_z, psf_yx)
if ndim == 3:
sigma_z, sigma_yx, amplitude, background = parameters_fitted
else:
sigma_z = None
sigma_yx, amplitude, background = parameters_fitted
# use connected components to detect potential clusters
cluster_regions, spots_out_cluster, cluster_size = get_clustered_region(
image_denoised,
spots,
voxel_size_z, voxel_size_yx, psf_z, psf_yx,
beta)
# case where no cluster where detected
if cluster_regions.size == 0:
cluster = np.array([], dtype=np.int64).reshape((0, ndim + 4))
return spots, cluster, reference_spot
# precompute gaussian function values
max_grid = max(200, cluster_size + 1)
precomputed_gaussian = precompute_erf(
voxel_size_z, voxel_size_yx, sigma_z, sigma_yx, max_grid=max_grid)
# fit gaussian mixtures in the cluster regions
spots_in_cluster, clusters = fit_gaussian_mixture(
image=image_denoised,
cluster_regions=cluster_regions,
voxel_size_z=voxel_size_z,
voxel_size_yx=voxel_size_yx,
sigma_z=sigma_z,
sigma_yx=sigma_yx,
amplitude=amplitude,
background=background,
precomputed_gaussian=precomputed_gaussian)
# normally the number of detected spots should increase
if len(spots_out_cluster) + len(spots_in_cluster) < len(spots):
warnings.warn("Problem occurs during the decomposition of clusters. "
"Less spots are detected after the decomposition than "
"before.",
UserWarning)
# merge outside and inside spots
spots = np.concatenate((spots_out_cluster, spots_in_cluster[:, :ndim]),
axis=0)
return spots, clusters, reference_spot
# ### Reference spot ###
def build_reference_spot(image, spots, voxel_size_z=None, voxel_size_yx=100,
psf_z=None, psf_yx=200, alpha=0.5):
"""Build a median or mean spot in 3 or 2 dimensions as reference.
Reference spot is computed from a sample of uncropped detected spots. If
such sample is not possible, an empty frame is returned.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x) or (y, x).
spots : np.ndarray, np.int64
Coordinate of the spots with shape (nb_spots, 3) for 3-d images or
(nb_spots, 2) for 2-d images.
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, image is
considered in 2-d.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
psf_z : int or float or None
Theoretical size of the PSF emitted by a spot in the z plan,
in nanometer. If None, image is considered in 2-d.
psf_yx : int or float
Theoretical size of the PSF emitted by a spot in the yx plan,
in nanometer.
alpha : int or float
Intensity score of the reference spot, between 0 and 1. If 0, reference
spot approximates the spot with the lowest intensity. If 1, reference
spot approximates the brightest spot. Default is 0.5.
Returns
-------
reference_spot : np.ndarray
Reference spot in 3-d or 2-d.
"""
# check parameters
stack.check_array(image,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.float32, np.float64])
stack.check_array(spots, ndim=2, dtype=np.int64)
stack.check_parameter(voxel_size_z=(int, float, type(None)),
voxel_size_yx=(int, float),
psf_z=(int, float, type(None)),
psf_yx=(int, float),
alpha=(int, float))
if alpha < 0 or alpha > 1:
raise ValueError("'alpha' should be a value between 0 and 1, not {0}"
.format(alpha))
# check number of dimensions
ndim = image.ndim
if ndim == 3 and voxel_size_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'voxel_size_z' parameter is missing.".format(ndim))
if ndim == 3 and psf_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'psf_z' parameter is missing.".format(ndim))
if ndim != spots.shape[1]:
raise ValueError("Provided image has {0} dimensions but spots are "
"detected in {1} dimensions."
.format(ndim, spots.shape[1]))
if ndim == 2:
voxel_size_z, psf_z = None, None
# compute radius
radius = get_radius(voxel_size_z, voxel_size_yx, psf_z, psf_yx)
# build reference spot
if image.ndim == 3:
reference_spot = _build_reference_spot_3d(image, spots, radius, alpha)
else:
reference_spot = _build_reference_spot_2d(image, spots, radius, alpha)
return reference_spot
def _build_reference_spot_3d(image, spots, radius, alpha):
"""Build a median or mean spot in 3 dimensions as reference.
Reference spot is computed from a sample of uncropped detected spots. If
such sample is not possible, an empty frame is returned.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x).
spots : np.ndarray, np.int64
Coordinate of the spots with shape (nb_spots, 3) for 3-d images.
radius : Tuple[float]
Radius in pixels of the detected spots, one element per dimension.
alpha : int or float
Intensity score of the reference spot, between 0 and 1. If 0, reference
spot approximates the spot with the lowest intensity. If 1, reference
spot approximates the brightest spot.
Returns
-------
reference_spot : np.ndarray
Reference spot in 3-d.
"""
# get a rounded radius for each dimension
radius_z = np.ceil(radius[0]).astype(np.int64)
z_shape = radius_z * 2 + 1
radius_yx = np.ceil(radius[-1]).astype(np.int64)
yx_shape = radius_yx * 2 + 1
# randomly choose some spots to aggregate
indices = [i for i in range(spots.shape[0])]
np.random.shuffle(indices)
indices = indices[:min(2000, spots.shape[0])]
candidate_spots = spots[indices, :]
# collect area around each spot
l_reference_spot = []
for i_spot in range(candidate_spots.shape[0]):
# get spot coordinates
spot_z, spot_y, spot_x = candidate_spots[i_spot, :]
# get the volume of the spot
image_spot = _get_spot_volume(image, spot_z, spot_y, spot_x,
radius_z, radius_yx)
# keep images that are not cropped by the borders
if image_spot.shape == (z_shape, yx_shape, yx_shape):
l_reference_spot.append(image_spot)
# if not enough spots are detected
if len(l_reference_spot) <= 30:
warnings.warn("Problem occurs during the computation of a reference "
"spot. Not enough (uncropped) spots have been detected.",
UserWarning)
if len(l_reference_spot) == 0:
reference_spot = np.zeros((z_shape, yx_shape, yx_shape),
dtype=image.dtype)
return reference_spot
# project the different spot images
l_reference_spot = np.stack(l_reference_spot, axis=0)
alpha_ = alpha * 100
reference_spot = np.percentile(l_reference_spot, alpha_, axis=0)
reference_spot = reference_spot.astype(image.dtype)
return reference_spot
def _get_spot_volume(image, spot_z, spot_y, spot_x, radius_z, radius_yx):
"""Get a subimage of a detected spot in 3 dimensions.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x).
spot_z : np.int64
Coordinate of the detected spot along the z axis.
spot_y : np.int64
Coordinate of the detected spot along the y axis.
spot_x : np.int64
Coordinate of the detected spot along the x axis.
radius_z : int
Radius in pixels of the detected spot, along the z axis.
radius_yx : int
Radius in pixels of the detected spot, on the yx plan.
Returns
-------
image_spot : np.ndarray
Reference spot in 3-d.
"""
# get boundaries of the volume surrounding the spot
z_spot_min = max(0, int(spot_z - radius_z))
z_spot_max = min(image.shape[0], int(spot_z + radius_z))
y_spot_min = max(0, int(spot_y - radius_yx))
y_spot_max = min(image.shape[1], int(spot_y + radius_yx))
x_spot_min = max(0, int(spot_x - radius_yx))
x_spot_max = min(image.shape[2], int(spot_x + radius_yx))
# get the volume of the spot
image_spot = image[z_spot_min:z_spot_max + 1,
y_spot_min:y_spot_max + 1,
x_spot_min:x_spot_max + 1]
return image_spot
def _build_reference_spot_2d(image, spots, radius, alpha):
"""Build a median or mean spot in 2 dimensions as reference.
Reference spot is computed from a sample of uncropped detected spots. If
such sample is not possible, an empty frame is returned.
Parameters
----------
image : np.ndarray
Image with shape (y, x).
spots : np.ndarray, np.int64
Coordinate of the spots with shape (nb_spots, 2) for 2-d images.
radius : Tuple[float]
Radius in pixels of the detected spots, one element per dimension.
alpha : int or float
Intensity score of the reference spot, between 0 and 1. If 0, reference
spot approximates the spot with the lowest intensity. If 1, reference
spot approximates the brightest spot.
Returns
-------
reference_spot : np.ndarray
Reference spot in 2-d.
"""
# get a rounded radius for each dimension
radius_yx = np.ceil(radius[-1]).astype(np.int64)
yx_shape = radius_yx * 2 + 1
# randomly choose some spots to aggregate
indices = [i for i in range(spots.shape[0])]
np.random.shuffle(indices)
indices = indices[:min(2000, spots.shape[0])]
candidate_spots = spots[indices, :]
# collect area around each spot
l_reference_spot = []
for i_spot in range(candidate_spots.shape[0]):
# get spot coordinates
spot_y, spot_x = candidate_spots[i_spot, :]
# get the volume of the spot
image_spot = _get_spot_surface(image, spot_y, spot_x, radius_yx)
# keep images that are not cropped by the borders
if image_spot.shape == (yx_shape, yx_shape):
l_reference_spot.append(image_spot)
# if not enough spots are detected
if len(l_reference_spot) <= 30:
warnings.warn("Problem occurs during the computation of a reference "
"spot. Not enough (uncropped) spots have been detected.",
UserWarning)
if len(l_reference_spot) == 0:
reference_spot = np.zeros((yx_shape, yx_shape), dtype=image.dtype)
return reference_spot
# project the different spot images
l_reference_spot = np.stack(l_reference_spot, axis=0)
alpha_ = alpha * 100
reference_spot = np.percentile(l_reference_spot, alpha_, axis=0)
reference_spot = reference_spot.astype(image.dtype)
return reference_spot
def _get_spot_surface(image, spot_y, spot_x, radius_yx):
"""Get a subimage of a detected spot in 2 dimensions.
Parameters
----------
image : np.ndarray
Image with shape (y, x).
spot_y : np.int64
Coordinate of the detected spot along the y axis.
spot_x : np.int64
Coordinate of the detected spot along the x axis.
radius_yx : int
Radius in pixels of the detected spot, on the yx plan.
Returns
-------
image_spot : np.ndarray
Reference spot in 2-d.
"""
# get boundaries of the surface surrounding the spot
y_spot_min = max(0, int(spot_y - radius_yx))
y_spot_max = min(image.shape[0], int(spot_y + radius_yx))
x_spot_min = max(0, int(spot_x - radius_yx))
x_spot_max = min(image.shape[1], int(spot_x + radius_yx))
# get the surface of the spot
image_spot = image[y_spot_min:y_spot_max + 1,
x_spot_min:x_spot_max + 1]
return image_spot
# ### Spot modelization ###
def modelize_spot(reference_spot, voxel_size_z=None, voxel_size_yx=100,
psf_z=None, psf_yx=200):
"""Fit a gaussian function on the reference spot.
Parameters
----------
reference_spot : np.ndarray
A 3-d or 2-d image with detected spot and shape (z, y, x) or (y, x).
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, reference
spot is considered in 2-d.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
psf_z : int or float or None
Theoretical size of the PSF emitted by a spot in the z plan,
in nanometer. If None, reference spot is considered in 2-d.
psf_yx : int or float
Theoretical size of the PSF emitted by a spot in the yx plan,
in nanometer.
Returns
-------
parameters_fitted : Tuple[float]
- sigma_z : float
Standard deviation of the spot along the z axis, in pixel.
Available only for a 3-d modelization.
- sigma_yx : float
Standard deviation of the spot along the yx axis, in pixel.
- amplitude : float
Amplitude of the spot.
- background : float
Background minimum value of the image.
"""
# check parameters
stack.check_array(reference_spot,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.float32, np.float64])
stack.check_parameter(voxel_size_z=(int, float, type(None)),
voxel_size_yx=(int, float),
psf_z=(int, float, type(None)),
psf_yx=(int, float))
# check number of dimensions
ndim = reference_spot.ndim
if ndim == 3 and voxel_size_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'voxel_size_z' parameter is missing.".format(ndim))
if ndim == 3 and psf_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'psf_z' parameter is missing.".format(ndim))
if ndim == 2:
voxel_size_z, psf_z = None, None
# initialize a grid representing the reference spot
grid, centroid_coord = _initialize_grid(
image_spot=reference_spot,
voxel_size_z=voxel_size_z,
voxel_size_yx=voxel_size_yx,
return_centroid=True)
# compute amplitude and background of the reference spot
amplitude, background = _initialize_background_amplitude(reference_spot)
# initialize parameters of the gaussian function
f = _objective_function(
nb_dimension=ndim,
voxel_size_z=voxel_size_z,
voxel_size_yx=voxel_size_yx,
psf_z=None,
psf_yx=None,
psf_amplitude=None)
if ndim == 3:
# parameters to fit: mu_z, mu_y, mu_x, sigma_z, sigma_yx, amplitude
# and background
centroid_z, centroid_y, centroid_x = centroid_coord
p0 = [centroid_z, centroid_y, centroid_x, psf_z, psf_yx, amplitude,
background]
l_bound = [-np.inf, -np.inf, -np.inf, -np.inf, -np.inf, -np.inf, 0]
u_bound = [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf]
else:
# parameters to fit: mu_y, mu_x, sigma_yx, amplitude and background
centroid_y, centroid_x = centroid_coord
p0 = [centroid_y, centroid_x, psf_yx, amplitude, background]
l_bound = [-np.inf, -np.inf, -np.inf, -np.inf, 0]
u_bound = [np.inf, np.inf, np.inf, np.inf, np.inf]
# fit a gaussian function on this reference spot
popt, pcov = _fit_gaussian(f, grid, reference_spot, p0,
lower_bound=l_bound,
upper_bound=u_bound)
# get optimized parameters to modelize the reference spot as a gaussian
if ndim == 3:
sigma_z = popt[3]
sigma_yx = popt[4]
amplitude = popt[5]
background = popt[6]
return sigma_z, sigma_yx, amplitude, background
else:
sigma_yx = popt[2]
amplitude = popt[3]
background = popt[4]
return sigma_yx, amplitude, background
# ### Spot modelization: initialization ###
def _initialize_grid(image_spot, voxel_size_z, voxel_size_yx,
return_centroid=False):
"""Build a grid in nanometer to compute gaussian function values over a
full volume or surface.
Parameters
----------
image_spot : np.ndarray
An image with detected spot and shape (z, y, x) or (y, x).
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, image spot
is considered in 2-d.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
return_centroid : bool
Compute centroid estimation of the grid.
Returns
-------
grid : np.ndarray, np.float32
A grid with the shape (3, z * y * x) or (2, y * x), in nanometer.
centroid_coord : Tuple[float]
Estimated centroid of the spot, in nanometer. One element per
dimension.
"""
# initialize grid in 2-d...
if image_spot.ndim == 2:
if return_centroid:
grid, centroid_y, centroid_x = _initialize_grid_2d(
image_spot, voxel_size_yx, return_centroid)
return grid, (centroid_y, centroid_x)
else:
grid = _initialize_grid_2d(
image_spot, voxel_size_yx, return_centroid)
return grid
# ... or 3-d
else:
if return_centroid:
grid, centroid_z, centroid_y, centroid_x = _initialize_grid_3d(
image_spot, voxel_size_z, voxel_size_yx, return_centroid)
return grid, (centroid_z, centroid_y, centroid_x)
else:
grid = _initialize_grid_3d(
image_spot, voxel_size_z, voxel_size_yx, return_centroid)
return grid
def _initialize_grid_3d(image_spot, voxel_size_z, voxel_size_yx,
return_centroid=False):
"""Build a grid in nanometer to compute gaussian function values over a
full volume.
Parameters
----------
image_spot : np.ndarray
A 3-d image with detected spot and shape (z, y, x).
voxel_size_z : int or float
Height of a voxel, along the z axis, in nanometer.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
return_centroid : bool
Compute centroid estimation of the grid.
Returns
-------
grid : np.ndarray, np.float32
A grid with the shape (3, z * y * x), in nanometer.
centroid_z : float
Estimated centroid of the spot, in nanometer, along the z axis.
centroid_y : float
Estimated centroid of the spot, in nanometer, along the y axis.
centroid_x : float
Estimated centroid of the spot, in nanometer, along the x axis.
"""
# get targeted size
nb_z, nb_y, nb_x = image_spot.shape
nb_pixels = image_spot.size
# build meshgrid
zz, yy, xx = np.meshgrid(np.arange(nb_z), np.arange(nb_y), np.arange(nb_x),
indexing="ij")
zz *= voxel_size_z
yy *= voxel_size_yx
xx *= voxel_size_yx
# format result
grid = np.zeros((3, nb_pixels), dtype=np.float32)
grid[0] = np.reshape(zz, (1, nb_pixels)).astype(np.float32)
grid[1] = np.reshape(yy, (1, nb_pixels)).astype(np.float32)
grid[2] = np.reshape(xx, (1, nb_pixels)).astype(np.float32)
# compute centroid of the grid
if return_centroid:
area = np.sum(image_spot)
dz = image_spot * zz
dy = image_spot * yy
dx = image_spot * xx
centroid_z = np.sum(dz) / area
centroid_y = np.sum(dy) / area
centroid_x = np.sum(dx) / area
return grid, centroid_z, centroid_y, centroid_x
else:
return grid
def _initialize_grid_2d(image_spot, voxel_size_yx, return_centroid=False):
"""Build a grid in nanometer to compute gaussian function values over a
full surface.
Parameters
----------
image_spot : np.ndarray
A 2-d image with detected spot and shape (y, x).
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
return_centroid : bool
Compute centroid estimation of the grid.
Returns
-------
grid : np.ndarray, np.float32
A grid with the shape (2, y * x), in nanometer.
centroid_y : float
Estimated centroid of the spot, in nanometer, along the y axis.
centroid_x : float
Estimated centroid of the spot, in nanometer, along the x axis.
"""
# get targeted size
nb_y, nb_x = image_spot.shape
nb_pixels = image_spot.size
# build meshgrid
yy, xx = np.meshgrid(np.arange(nb_y), np.arange(nb_x), indexing="ij")
yy *= voxel_size_yx
xx *= voxel_size_yx
# format result
grid = np.zeros((2, nb_pixels), dtype=np.float32)
grid[0] = np.reshape(yy, (1, nb_pixels)).astype(np.float32)
grid[1] = | np.reshape(xx, (1, nb_pixels)) | numpy.reshape |
import json
import numpy as np
import matplotlib.pyplot as plt
def to_seconds(s):
hr, min, sec = [float(x) for x in s.split(':')]
return hr*3600 + min*60 + sec
def extract(gst_log, script_log, debug=False):
with open(gst_log, "r") as f:
lines = f.readlines()
id_s = "create:<v4l2src"
st_s = "sync to "
ts_s = "out ts "
lf_s = "lost frames detected: count = "
st_l, ts_l, lf_l = [[], []], [[], []], [[], []]
for line in lines:
id_p = line.find(id_s)
st_p = line.find(st_s)
ts_p = line.find(ts_s)
lf_p = line.find(lf_s)
if id_p > 0:
id_p += len(id_s)
id = int(line[id_p:id_p+1])
if st_p > 0 and ts_p > 0:
st_p += len(st_s)
ts_p += len(ts_s)
st = to_seconds(line[st_p:st_p+17])
ts = to_seconds(line[ts_p:ts_p+17])
if debug:
print(id, st, ts)
st_l[id].append(st)
ts_l[id].append(ts)
if lf_p > 0:
lf_p += len(lf_s)
line = line[lf_p:]
lf = int(line.split()[0])
p = line.find("ts: ")
t = to_seconds(line[p+4:])
if debug:
print("lf", id, lf, t)
lf_l[id].append((lf, t))
st, ts, lf = st_l, ts_l, lf_l
# print(st)
# print(ts)
with open(script_log, "r") as f:
lines = f.readlines()
new_s = "Samples_"
pull_s = "Pulled_"
over_s = "Overrun_"
n, p, o = [[], []], [[], []], [[], []]
for line in lines:
new_p = line.find(new_s)
pull_p = line.find(pull_s)
over_p = line.find(over_s)
if new_p >= 0:
new_p += len(new_s)
id = int(line[new_p])
n[id].append([float(x) for x in line[new_p + 2:].split()])
if pull_p >= 0:
pull_p += len(pull_s)
id = int(line[pull_p])
p[id].append([float(x) for x in line[pull_p + 2:].split() if x != "at"])
if over_p >= 0:
over_p += len(over_s)
id = int(line[over_p])
o[id].append([float(x) for x in line[over_p + 2:].split()])
# print(n)
# print(p)
if debug:
print(o)
with open(script_log + ".json", "w") as f:
d = {"st" : st, "ts" : ts, "lf" : lf, "n" : n, "p" : p, "o" : o}
json.dump(d, f, indent=4)
def load(json_filename):
with open(json_filename, "r") as f:
return json.load(f)
# return d["st"], d["ts"], d["lf"], d["n"], d["p"], d["o"]
def plot(d, ids):
st, ts, lf, n, p, o = d["st"], d["ts"], d["lf"], d["n"], d["p"], d["o"]
plt.figure("v4l2src")
for id in range(ids):
lf[id] = | np.array(lf[id]) | numpy.array |
import unittest
from yauber_algo.errors import *
class CategorizeTestCase(unittest.TestCase):
def test_categorize(self):
import yauber_algo.sanitychecks as sc
from numpy import array, nan, inf
import os
import sys
import pandas as pd
import numpy as np
from yauber_algo.algo import categorize
#
# Function settings
#
algo = 'categorize'
func = categorize
with sc.SanityChecker(algo) as s:
#
# Check regular algorithm logic
#
s.check_regular(
np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.]),
func,
(
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]),
[0, 3, 6, 10]
),
suffix='reg'
)
s.check_regular(
np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.]),
func,
(
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]),
[0.1, 3, 6, 10]
),
suffix='min_not_in_bins',
exception=YaUberAlgoInternalError
)
s.check_regular(
np.array([0., 0., 0., 0., 1., 1., 1., 2., 2., 2.]),
func,
(
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]),
[0, 3, 6, 9.999]
),
suffix='max_not_in_bins',
exception=YaUberAlgoInternalError
)
s.check_regular(
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),
func,
(
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]),
[0, 10]
),
suffix='min_max_one_bin',
exception=YaUberAlgoArgumentError
)
s.check_regular(
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),
func,
(
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]),
[0, 10, 10]
),
suffix='bins_non_unique',
exception=YaUberAlgoArgumentError
)
s.check_regular(
np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),
func,
(
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 10]),
[0, 10, 5]
),
suffix='bins_not_sorted',
exception=YaUberAlgoArgumentError
)
s.check_regular(
| np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) | numpy.array |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = | np.array([]) | numpy.array |
import pygame as pg
import numpy as np
from numba import njit
def main():
pg.init()
pg.display.set_caption("Dead and - A Python game by FinFET, thanks for playing!")
font = pg.font.SysFont("Courier New", 70)
sounds = load_sounds()
m_vol, sfx_vol, music = 0.4, 0.5, 0
set_volume(m_vol, sfx_vol, sounds)
sounds['music'+str(music)].play(-1)
stepdelay = pg.time.get_ticks()/200
stepdelay2 = stepdelay
click, clickdelay = 0, stepdelay
screen = pg.display.set_mode((800,600))
running, pause, options, newgame = 1, 1, 0, 2
clock = pg.time.Clock()
pg.mouse.set_visible(False)
timer = 0
hres, halfvres, mod, frame = adjust_resolution()
fullscreen = 0
level, player_health, swordsp, story = 0, 0, 0, 0
#sky1, floor, wall, door, window, enemies
level_textures = [[0, 1, 0, 0, 1, 4], #level 0
[0, 2, 1, 1, 0, 3], #level 1
[1, 0, 2, 1, 1, 4], #level 2
[1, 3, 1, 0, 0, 1], #level 3
[2, 1, 2, 1, 1, 0], #level 4
[2, 0, 0, 0, 0, 2]] #level 5
menu = [pg.image.load('Assets/Textures/menu0.png').convert_alpha()]
menu.append(pg.image.load('Assets/Textures/options.png').convert_alpha())
menu.append(pg.image.load('Assets/Textures/credits.png').convert_alpha())
menu.append(pg.image.load('Assets/Textures/menu1.png').convert_alpha())
hearts = pg.image.load('Assets/Textures/hearts.png').convert_alpha()
colonel = pg.image.load('Assets/Sprites/colonel1.png').convert_alpha()
hearts2 = pg.Surface.subsurface(hearts,(0,0,player_health*10,20))
exit1 = pg.image.load('Assets/Textures/exit.png').convert_alpha()
exit2 = 1
exits = [pg.Surface.subsurface(exit1,(0,0,50,50)), pg.Surface.subsurface(exit1,(50,0,50,50))]
splash = []
for i in range(4):
splash.append(pg.image.load('Assets/Textures/splash'+str(i)+'.jpg').convert())
blood = pg.image.load('Assets/Textures/blood0.png').convert_alpha()
blood_size = np.asarray(blood.get_size())
sky1 = hearts.copy() # initialize with something to adjust resol on start
msg = "Press any key..."
surf = splash[0].copy()
splash_screen(msg, splash[0], clock, font, screen)
msg = " "
while running:
pg.display.update()
ticks = pg.time.get_ticks()/200
er = min(clock.tick()/500, 0.3)
if not pause and (player_health <= 0 or (exit2 == 0 and int(posx) == exitx and int(posy) == exity)):
msg = ' '
if player_health <= 0:
sounds['died'].play()
newgame = 2
surf = splash[3].copy()
else:
level += 1
player_health = min(player_health+2, 20)
sounds['won'].play()
newgame = 1
if level > 5:
level, newgame = 0, 2
sounds['died'].play()
surf = splash[2].copy()
surf.blit(font.render('Total time: ' + str(round(timer,1)), 1, (255, 255, 255)), (20, 525))
else:
msg = "Cleared level " + str(level-1)+'!'
splash_screen(msg, surf, clock, font, screen)
pause, clickdelay = 1, ticks
pg.time.wait(500)
if pg.mouse.get_pressed()[0]:
if swordsp < 1 and not pause:
swordsp, damage_mod = 1, 1
if pause and ticks - clickdelay > 1:
click, clickdelay = 1, ticks
sounds['healthup'].play()
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
if event.type == pg.KEYDOWN:
if event.key == ord('p') or event.key == pg.K_ESCAPE:
if not pause:
pause = 1
else:
if options > 0:
options = 0
elif newgame == 0:
pause = 0
pg.mouse.set_pos(400,300)
if event.key == ord('f'): # toggle fullscreen
pg.display.toggle_fullscreen()
fullscreen = not(fullscreen)
if pause:
clock.tick(60)
surf2, pause, options, running, newgame, adjust_res, m_vol, sfx_vol, story = pause_menu(
surf.copy(), menu, pause, options, click, running, m_vol, sfx_vol, sounds, newgame, font, msg, level, ticks, hres, story)
if adjust_res != 1:
hres, halfvres, mod, frame = adjust_resolution(int(hres*adjust_res))
sky = pg.surfarray.array3d(pg.transform.smoothscale(sky1, (720, halfvres*4)))
adjust_res = 1
screen.blit(surf2, (0,0))
click = 0
if newgame == 1:
newgame, pause = 0, not(pause)
if player_health <= 0 or msg[0] != 'C':
surf = splash[1].copy()
splash_screen(' ', surf, clock, font, screen)
level, player_health, timer = 0, 20, -0.1
if np.random.randint(0, 2) != music:
sounds['music'+str(music)].fadeout(1000)
music = int(not(music))
sounds['music'+str(music)].play(-1)
msg = 'Loading...'
surf2 = surf.copy()
surf2.blit(font.render(msg, 1, (255, 255, 255)), (30, 500))
surf2.blit(font.render(msg, 1, (30, 255, 155)), (32, 502))
screen.blit(surf2, (0,0))
pg.display.update()
msg = 'Kill the monsters!'
if story:
posx, posy, rot, rotv, maph, mapc, exitx, exity, stepscount, size = load_map(level)
nlevel = level_textures[level]
else:
size = np.random.randint(10+level*2, 16+level*2)
nenemies = size #number of enemies
posx, posy, rot, rotv, maph, mapc, exitx, exity, stepscount = gen_map(size)
nlevel = [np.random.randint(0,3), #sky1
np.random.randint(0,4), #floorwall
np.random.randint(0,3), #wall
np.random.randint(0,2), #door
np.random.randint(0,2), #window
np.random.randint(0,5), #enemies
]
nenemies = level**2 + 10 + level #number of enemies
sprites, spsize, sword, swordsp = get_sprites(nlevel[5])
sky1, floor, wall, bwall, door, window = load_textures(nlevel)
sky = pg.surfarray.array3d(pg.transform.smoothscale(sky1, (720, halfvres*4)))
enemies = spawn_enemies(nenemies, maph, size, posx, posy, level/2)
hearts2 = pg.Surface.subsurface(hearts,(0,0,player_health*10,20))
exit2, damage_mod, blood_scale = 1, 1, 1
mape, minimap = np.zeros((size, size)), np.zeros((size, size, 3))
sounds['healthup'].play()
else:
timer = timer + er/2
frame = new_frame(posx-0.2*np.cos(rot), posy-0.2*np.sin(rot), rot, frame, sky, floor, hres, halfvres,
mod, maph, size, wall, mapc, exitx, exity, nenemies, rotv, door, window, bwall, exit2)
surf = pg.surfarray.make_surface(frame)
mape = np.zeros((size, size))
health = player_health
enemies, player_health, mape = enemies_ai(posx, posy, enemies, maph, size, mape, swordsp, ticks, player_health, nenemies, level/3)
enemies = sort_sprites(posx-0.2*np.cos(rot), posy-0.2*np.sin(rot), rot, enemies, maph, size, er/3)
if exit2 == 0:
surf = draw_colonel(surf, colonel, posx-0.2*np.cos(rot), posy-0.2*np.sin(rot), exitx+0.5, exity+0.5,
hres, halfvres, rot, rotv, maph, size)
surf, en = draw_sprites(surf, sprites, enemies, spsize, hres, halfvres, ticks, sword, swordsp, rotv)
if int(swordsp) > 0 and damage_mod < 1:
blood_scale = blood_scale*(1 + 2*er)
scaled_blood = pg.transform.scale(blood, 4*blood_scale*blood_size*hres/800)
surf.blit(scaled_blood, np.asarray([hres/2, halfvres]) - 2*blood_scale*blood_size*hres/800)
surf = pg.transform.scale2x(surf)
surf = pg.transform.smoothscale(surf, (800, 600))
surf.blit(hearts2, (20,20))
if exit2 == 0:
minimap[int(posx)][int(posy)] = (50, 50, 255)
surfmap = pg.surfarray.make_surface(minimap.astype('uint8'))
surfmap = pg.transform.scale(surfmap, (size*5, size*5))
surf.blit(surfmap,(20, 50), special_flags=pg.BLEND_ADD)
minimap[int(posx)][int(posy)] = (100, 100, 0)
surf.blit(font.render(str(round(timer,1)), 1, (255, 255, 255)), (20, 525))
surf.blit(exits[exit2], (730,20))
screen.blit(surf, (0,0))
if health > player_health:
hearts2 = pg.Surface.subsurface(hearts,(0,0,player_health*10,20))
sounds['hurt'].play()
if ticks - stepdelay > 2 and stepscount != posx + posy:
sounds['step'].play()
stepdelay = ticks
stepscount = posx + posy
if mape[int(posx)][int(posy)] > 0:
delaycontrol = max(0.3, 2/np.random.uniform(0.99, mape[int(posx)][int(posy)]))
if ticks - stepdelay2 > delaycontrol:
sounds['step2'].play()
stepdelay2 = ticks
if int(swordsp) > 0:
if swordsp == 1:
damage_mod = 1
while enemies[en][3] < 10 and damage_mod > 0.4 and en >= 0:
x = posx -0.2*np.cos(rot) + np.cos(rot + np.random.uniform(0, 0.05))/enemies[en][3]
y = posy -0.2*np.sin(rot) + np.sin(rot + np.random.uniform(0, 0.05))/enemies[en][3]
z = 0.5 + np.sin(rotv*-0.392699)/enemies[en][3]
dist2en = np.sqrt((enemies[en][0]-x)**2 + (enemies[en][1]-y)**2)
if dist2en < 0.1 and z > 0 and z < 0.07*enemies[en][5]:
if z > 0.05*enemies[en][5]:
enemies[en][8] = enemies[en][8] - np.random.uniform(0,2)*2
else:
enemies[en][8] = enemies[en][8] - np.random.uniform(0,2)
enemies[en][10] = ticks
x = enemies[en][0] + 0.1*np.cos(rot)
y = enemies[en][1] + 0.1*np.sin(rot)
if maph[int(x)][int(y)] == 0:
enemies[en][0]= (x + enemies[en][0])/2 # push back
enemies[en][1]= (y + enemies[en][1])/2
if damage_mod == 1:
blood_scale = enemies[en][3]
sounds['swoosh'].play()
if enemies[en][4]:
sounds['hitmonster2'].set_volume(min(1, enemies[en][3])*sfx_vol)
sounds['hitmonster2'].play()
else:
sounds['hitmonster'].set_volume(min(1, enemies[en][3])*sfx_vol)
sounds['hitmonster'].play()
damage_mod = damage_mod*0.5
if enemies[en][8] < 0:
sounds['deadmonster'].set_volume(min(1, enemies[en][3])*sfx_vol)
sounds['deadmonster'].play()
nenemies = nenemies - 1
if nenemies == 0:
exit2, msg = 0, "Find the master!"
## if np.random.uniform(0,1) < 0.3:
## player_health = min(player_health+0.5, 20)
## hearts2 = pg.Surface.subsurface(hearts,(0,0,player_health*10,20))
## sounds['healthup'].play()
en = en - 1
if damage_mod == 1:
sounds['swoosh2'].play()
swordsp = (swordsp + er*10)%4
fps = int(clock.get_fps())
pg.display.set_caption("Health: "+str(round(player_health, 1))+" Enemies: " + str(nenemies) + " FPS: " + str(fps)+ ' ' + msg)
posx, posy, rot, rotv = movement(pg.key.get_pressed(), posx, posy, rot, maph, er, rotv)
pg.mouse.set_pos(400,300)
def movement(pressed_keys, posx, posy, rot, maph, et, rotv):
x, y, diag = posx, posy, 0
if pg.mouse.get_focused():
p_mouse = pg.mouse.get_pos()
rot = rot + np.clip((p_mouse[0]-400)/200, -0.2, .2)
rotv = rotv + np.clip((p_mouse[1]-300)/200, -0.2, .2)
rotv = np.clip(rotv, -0.999, .999)
if pressed_keys[pg.K_UP] or pressed_keys[ord('w')]:
x, y, diag = x + et*np.cos(rot), y + et*np.sin(rot), 1
elif pressed_keys[pg.K_DOWN] or pressed_keys[ord('s')]:
x, y, diag = x - et*np.cos(rot), y - et*np.sin(rot), 1
if pressed_keys[pg.K_LEFT] or pressed_keys[ord('a')]:
et = et/(diag+1)
x, y = x + et*np.sin(rot), y - et*np.cos(rot)
elif pressed_keys[pg.K_RIGHT] or pressed_keys[ord('d')]:
et = et/(diag+1)
x, y = x - et*np.sin(rot), y + et*np.cos(rot)
posx, posy = check_walls(posx, posy, maph, x, y)
return posx, posy, rot, rotv
def gen_map(size):
mapc = np.random.uniform(0,1, (size,size,3))
maph = np.random.choice([0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4], (size,size))
maph[0,:] = np.random.choice([1, 2, 3, 4], size)
maph[size-1,:] = np.random.choice([1, 2, 3, 4], size)
maph[:,0] = np.random.choice([1, 2, 3, 4], size)
maph[:,size-1] = np.random.choice([1, 2, 3, 4], size)
posx, posy = np.random.randint(1, size -2)+0.5, np.random.randint(1, size -2)+0.5
rot, rotv, stepscount = np.pi/4, 0, posx + posy
x, y = int(posx), int(posy)
maph[x][y] = 0
count = 0
while True:
testx, testy = (x, y)
if np.random.uniform() > 0.5:
testx = testx + np.random.choice([-1, 1])
else:
testy = testy + np.random.choice([-1, 1])
if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:
if maph[testx][testy] == 0 or count > 5:
count = 0
x, y = (testx, testy)
maph[x][y] = 0
dtx = np.sqrt((x-posx)**2 + (y-posy)**2)
if (dtx > size*.6 and np.random.uniform() > .999) or np.random.uniform() > .99999:
exitx, exity = (x, y)
break
else:
count = count+1
return posx, posy, rot, rotv, maph, mapc, exitx, exity, stepscount
def load_map(level):
mapc = pg.surfarray.array3d(pg.image.load('Assets/Levels/map'+str(level)+'.png'))
size = len(mapc)
maph = np.random.choice([1, 2, 3, 4], (size,size))
colors = np.asarray([[0,0,0], [255,255,255], [127,127,127]])
posx, exitx = None, None
for i in range(size):
for j in range(size):
color = mapc[i][j]
if (color == colors[0]).all() or (color == colors[1]).all() or (color == colors[2]).all():
maph[i][j] = 0
if (color == colors[1]).all():
posx, posy = i+0.5, j+0.5
if (color == colors[2]).all():
exitx, exity = i, j
while posx == None: # if no start is found
x, y = np.random.randint(1, size), np.random.randint(1, size)
if (mapc[x][y] == colors[0]).all():
posx, posy = x+0.5, y+0.5
while exitx == None: # if no exit is found
x, y = np.random.randint(1, size), np.random.randint(1, size)
if (mapc[x][y] == colors[0]).all():
exitx, exity = x, y
rot, rotv, stepscount = np.pi/4, 0, posx + posy
return posx, posy, rot, rotv, maph, mapc/255, exitx, exity, stepscount, size
@njit(cache=True)
def new_frame(posx, posy, rot, frame, sky, floor, hres, halfvres, mod, maph, size, wall, mapc,
exitx, exity, nenemies, rotv, door, window, bwall, exit2):
offset = -int(halfvres*rotv)
for i in range(hres):
rot_i = rot + np.deg2rad(i/mod - 30)
sin, cos, cos2 = np.sin(rot_i), np.cos(rot_i), np.cos(np.deg2rad(i/mod - 30))
frame[i][:] = sky[int(np.rad2deg(rot_i)*2%720)][halfvres-offset:3*halfvres-offset]
n = 0
n2 = 0
x, y = posx +0.2*cos, posy +0.2*sin
for j in range(2000):
x, y = x +0.01*cos, y +0.01*sin
if n == 0 and maph[int(x)%(size-1)][int(y)%(size-1)] != 0: # found lower wall
n = np.sqrt((x-posx)**2+(y-posy)**2)
if maph[int(x)%(size-1)][int(y)%(size-1)] == 2:# found upper wall
n2 = np.sqrt((x-posx)**2+(y-posy)**2)
h = halfvres/(n2*cos2 + 0.001)
break
cwall = wall
if n2 > 0.5 and 3*h > int(halfvres/(n*cos2 + 0.000001)): #draw upper wall
xx = int(x*3%1*99)
xxx = x%1
if x%1 < 0.01 or x%1 > 0.99:
xx = int(y*3%1*99)
xxx = y%1
yy = np.linspace(0, 3, int(h*2))*99%99
shade = 0.3 + 0.7*(h/halfvres)
if shade > 1:
shade = 1
if maph[int(x-0.02)%(size-1)][int(y-0.02)%(size-1)] != 0:
shade = shade*0.8
c = shade*mapc[int(x)%(size-1)][int(y)%(size-1)]
if n2 > 3.5:
cwall = bwall
for k in range(int(h)*2):
c2 = c*cwall[xx][int(yy[k])]
h1 = int(halfvres - int(h) +k +offset -2*h +3)
h2 = int(halfvres+3*h-k+offset-1 +2*h - 6)
if xxx > 1/3 and xxx < 2/3 and k > h*2/3 and k < h*4/3:
c2 = shade*window[xx][int(yy[k])]
if h1 >= 0 and h1 < 2*halfvres:
frame[i][h1] = c2
if h2 < halfvres*2:
frame[i][h2] = c2
if n == 0:
n = 1000
x, y = posx +n*cos, posy +n*sin
walltype = maph[int(x)%(size-1)][int(y)%(size-1)]
cwall = wall
if n > 3.5:
cwall = bwall
h = int(halfvres/(n*cos2 + 0.000001))
xx = int(x*3%1*99)
xxx = x%1
if x%1 < 0.01 or x%1 > 0.99:
xx = int(y*3%1*99)
xxx = y%1
yy = np.linspace(0, 3, int(h*2))*99%99
shade = 0.4 + 0.6*(h/halfvres)
if shade > 1:
shade = 1
ash = 0
if maph[int(x-0.33)%(size-1)][int(y-0.33)%(size-1)] != 0:
ash = 1
if maph[int(x-0.01)%(size-1)][int(y-0.01)%(size-1)] != 0:
shade, ash = shade*0.7, 0
c = mapc[int(x)%(size-1)][int(y)%(size-1)]
cdoor = np.sqrt( | np.ones(3) | numpy.ones |
import numpy as np
import scipy.sparse.linalg as sla
import scipy.sparse as sp
try:
import torch
from torch.autograd import Variable
except:
pass
import re
from abc import ABCMeta, abstractmethod
def block(rows, dtype=None, arrtype=None):
if (not _is_list_or_tup(rows)) or len(rows) == 0 or \
np.any([not _is_list_or_tup(row) for row in rows]):
raise RuntimeError('''
Unexpected input: Expected a non-empty list of lists.
If you are interested in helping expand the functionality
for your use case please send in an issue or PR at
http://github.com/bamos/block''')
rowLens = [len(row) for row in rows]
if len(np.unique(rowLens)) > 1:
raise RuntimeError('''
Unexpected input: Rows are not the same length.
Row lengths: {}'''.format(rowLens))
nRows = len(rows)
nCols = rowLens[0]
rowSizes = np.zeros(nRows, dtype=int)
colSizes = np.zeros(nCols, dtype=int)
backend = _get_backend(rows, dtype, arrtype)
for i, row in enumerate(rows):
for j, elem in enumerate(row):
if backend.is_complete(elem):
rowSz, colSz = backend.extract_shape(elem)
rowSizes[i] = rowSz
colSizes[j] = colSz
elif hasattr(elem, 'shape'):
rowSz, colSz = elem.shape
rowSizes[i] = rowSz
colSizes[j] = colSz
elif hasattr(elem, 'size'):
rowSz, colSz = elem.size()
rowSizes[i] = rowSz
colSizes[j] = colSz
cRows = []
for row, rowSz in zip(rows, rowSizes):
rowSz = int(rowSz)
if rowSz == 0:
continue
cCol = []
for elem, colSz in zip(row, colSizes):
colSz = int(colSz)
if colSz == 0:
continue
# TODO: Check types.
if backend.is_complete(elem):
cElem = elem
elif isinstance(elem, float) or isinstance(elem, int):
cElem = backend.build_full((rowSz, colSz), elem)
elif isinstance(elem, str):
if elem == 'I':
assert(rowSz == colSz)
cElem = backend.build_eye(rowSz)
elif elem == '-I':
assert(rowSz == colSz)
cElem = -backend.build_eye(rowSz)
else:
assert(False)
else:
cElem = backend.convert(elem)
cCol.append(cElem)
cRows.append(cCol)
return backend.build(cRows)
def block_diag(elems, dtype=None, arrtype=None):
n = len(elems)
return block([[0] * i + [elem] + [0] * (n - 1 - i)
for i, elem in enumerate(elems)],
dtype=dtype, arrtype=arrtype)
def block_tridiag(main, upper, lower):
n = len(main)
assert len(main) == len(upper) + 1
assert len(main) == len(lower) + 1
mat = ()
for i in range(n):
tup = ()
for j in range(n):
if (i==j): tup = (*tup, main[i])
elif (i==j-1): tup = (*tup, upper[-i])
elif (i==j+1): tup = (*tup, lower[i-1])
else: tup = (*tup,0)
mat = (*mat,tup)
return block(mat)
def _is_list_or_tup(x):
return isinstance(x, list) or isinstance(x, tuple)
def _get_backend(rows, dtype, arrtype):
if arrtype == np.ndarray and dtype is not None:
return NumpyBackend(arrtype, dtype)
elif arrtype == sla.LinearOperator:
return LinearOperatorBackend(dtype)
elif arrtype is not None and re.search('torch\..*Tensor', repr(arrtype)):
return TorchBackend(dtype)
elif arrtype is not None and re.search('torch\..*(Variable|Parameter)', repr(arrtype)):
return TorchVariableBackend(dtype)
else:
npb = NumpyBackend()
tb = TorchBackend()
lob = LinearOperatorBackend()
tvb = TorchVariableBackend()
for row in rows:
for elem in row:
if npb.is_complete(elem) and elem.size > 0:
if dtype is None:
dtype = type(elem[0, 0])
if arrtype is None:
arrtype = type(elem)
return NumpyBackend(dtype, arrtype)
elif tb.is_complete(elem):
return TorchBackend(type(elem))
elif lob.is_complete(elem):
return LinearOperatorBackend(elem.dtype)
elif tvb.is_complete(elem):
return TorchVariableBackend(type(elem.data))
assert(False)
class Backend():
__metaclass__ = ABCMeta
@abstractmethod
def extract_shape(self, x): pass
@abstractmethod
def build_eye(self, n): pass
@abstractmethod
def build_full(self, shape, fill_val): pass
@abstractmethod
def convert(self, x): pass
@abstractmethod
def build(self, rows): pass
@abstractmethod
def is_complete(self, rows): pass
class NumpyBackend(Backend):
def __init__(self, dtype=None, arrtype=None):
self.dtype = dtype
self.arrtype = arrtype
def extract_shape(self, x):
return x.shape
def build_eye(self, n):
return np.eye(n)
def build_full(self, shape, fill_val):
return np.full(shape, fill_val, self.dtype)
def convert(self, x):
assert(False)
def build(self, rows):
return np.bmat(rows)
def is_complete(self, x):
return isinstance(x, np.ndarray)
class TorchBackend(Backend):
def __init__(self, dtype=None):
self.dtype = dtype
def extract_shape(self, x):
return x.size()
def build_eye(self, n):
return torch.eye(n).type(self.dtype)
def build_full(self, shape, fill_val):
return fill_val * torch.ones(*shape).type(self.dtype)
def convert(self, x):
assert(False)
def build(self, rows):
compRows = []
for row in rows:
compRows.append(torch.cat(row, 1))
return torch.cat(compRows)
def is_complete(self, x):
return (re.search('torch\..*Tensor', str(x.__class__)) is not None) \
and x.ndimension() == 2
class TorchVariableBackend(TorchBackend):
def build_eye(self, n):
return Variable(super().build_eye(n))
def build_full(self, shape, fill_val):
return Variable(super().build_full(shape, fill_val))
def convert(self, x):
if TorchBackend.is_complete(self, x):
return Variable(x)
assert(False)
def is_complete(self, x):
return re.search('torch\..*(Variable|Parameter)', str(x.__class__))
class LinearOperatorBackend(Backend):
def __init__(self, dtype=None):
self.dtype = dtype
def extract_shape(self, x):
return x.shape
def build_eye(self, n):
def identity(v): return v
return sla.LinearOperator(shape=(n, n),
matvec=identity,
rmatvec=identity,
matmat=identity,
dtype=self.dtype)
def build_full(self, shape, fill_val):
m, n = shape
if fill_val == 0:
return shape
else:
def matvec(v):
return v.sum() * fill_val * | np.ones(m) | numpy.ones |
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import copy
from flearn.models.client import Client
from flearn.utils.model_utils import Metrics
from flearn.utils.tf_utils import process_grad, cosine_sim, softmax, norm_grad, norm_grad_sparse, l2_clip, get_stdev
from flearn.utils.model_utils import batch_data, gen_batch, gen_epoch, gen_batch_celeba
from flearn.utils.language_utils import letter_to_vec, word_to_indices
class Server(object):
def __init__(self, params, learner, dataset):
# transfer parameters to self
for key, val in params.items():
setattr(self, key, val)
self.inner_opt = tf.train.GradientDescentOptimizer(params['learning_rate'])
# create worker nodes
tf.reset_default_graph()
self.client_model = learner(*params['model_params'], self.q, self.inner_opt, self.seed)
self.clients = self.setup_clients(dataset, self.dynamic_lam, self.client_model)
print('{} Clients in Total'.format(len(self.clients)))
self.latest_model = copy.deepcopy(self.client_model.get_params()) # self.latest_model is the global model
self.local_models = []
self.interpolation = []
self.global_model = copy.deepcopy(self.latest_model)
for _ in self.clients:
self.local_models.append(copy.deepcopy(self.latest_model))
self.interpolation.append(copy.deepcopy(self.latest_model))
# initialize system metrics
self.metrics = Metrics(self.clients, params)
def __del__(self):
self.client_model.close()
def setup_clients(self, dataset, dynamic_lam, model=None):
'''instantiates clients based on given train and test data directories
Return:
list of Clients
'''
users, groups, train_data, test_data = dataset
if len(groups) == 0:
groups = [None for _ in users]
all_clients = [Client(u, g, train_data[u], test_data[u], dynamic_lam, model) for u, g in zip(users, groups)]
return all_clients
def train_error(self, models):
num_samples = []
tot_correct = []
losses = []
for idx, c in enumerate(self.clients):
self.client_model.set_params(models[idx])
ct, cl, ns = c.train_error()
tot_correct.append(ct * 1.0)
num_samples.append(ns)
losses.append(cl*1.0)
return np.array(num_samples), np.array(tot_correct), np.array(losses)
def test(self, models):
'''tests self.latest_model on given clients
'''
num_samples = []
tot_correct = []
losses = []
for idx, c in enumerate(self.clients):
self.client_model.set_params(models[idx])
ct, cl, ns = c.test()
tot_correct.append(ct * 1.0)
num_samples.append(ns)
losses.append(cl * 1.0)
return np.array(num_samples), | np.array(tot_correct) | numpy.array |
"""
## Function translated automatically using 'matlab.to.r()'
## Author: <NAME>, <NAME>
"""
import numpy as np
from project.cell import cell
from project.size import size
from project.zeros import zeros
from project.feval import feval
from project.LinMatrixL import LinMatrixL
from project.LinMatrixH import LinMatrixH
from project.LinMatrixLH import LinMatrixLH
from project.LinMatrixL_occ import LinMatrixL_occ
from project.hessian_eta_complex import hessian_eta_complex
from project.trace_matrix import trace_matrix
from project.diag_matlab import diag_matlab
def v(model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,d,sigma,docc,poped_db):
# number of samples X number of samples (per individual)
bUseAutoCorrelation = len(poped_db["model"]["auto_pointer"]) != 0
bUseFullSigmaCorrelation = False
if poped_db["settings"]["m2_switch"][1] == 0 or poped_db["settings"]["m2_switch"][1] == 1:
returnArgs = LinMatrixL(model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,poped_db)
l = returnArgs[0]
poped_db = returnArgs[1]
returnArgs = LinMatrixH(model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,poped_db)
h = returnArgs[0]
poped_db = returnArgs[1]
ret = zeros(0,1)
if len(sigma) != 0 and bUseFullSigmaCorrelation: #Update sigma to be fully correlated
for i in range(0, size(sigma)[0]):
for j in range(0, size(sigma)[0]):
if i != j:
sigma[i,j] = np.sqrt(sigma[i,i]*sigma[j,j])
#Add all IIV
if len(d) != 0:
ret = np.matmul(np.matmul(l, d), np.transpose(l))
else:
ret = zeros(len(xt_ind), len(xt_ind))
if poped_db["settings"]["bUseSecondOrder"]:
var_eta = zeros(1, len(xt_ind))
for o in range(0,len(xt_ind)):
hessian_eta = hessian_eta_complex(model_switch,xt_ind[o],x,a,bpop,b_ind,bocc_ind,poped_db)
var_eta[o] = 1/4 * trace_matrix(hessian_eta * d * (2 * hessian_eta) * d)
ret = ret + diag_matlab(var_eta)
locc = cell(1, poped_db["parameters"]["NumOcc"])
#Add all occasion variability
for i in range(0, poped_db["parameters"]["NumOcc"]):
if poped_db["parameters"]["NumOcc"] == 0:
continue
returnArgs = LinMatrixL_occ(model_switch,xt_ind,x,a,bpop,b_ind,bocc_ind,i,poped_db)
locc_tmp = returnArgs[0]
poped_db = returnArgs[1]
if len(ret) == 0:
ret = np.matmul(np.matmul(locc_tmp,docc), np.transpose(locc_tmp))
else:
ret = ret + np.matmul(np.matmul(locc_tmp,docc), | np.transpose(locc_tmp) | numpy.transpose |
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
from .layers.contrastive import ContrastiveLoss
from .layers.utils import l1norm, l2norm
from .layers.img_enc import EncoderImage
from .layers.txt_enc import EncoderText
class VisualSA(nn.Layer):
"""
Build global image representations by self-attention.
Args: - local: local region embeddings, shape: (batch_size, 36, 1024)
- raw_global: raw image by averaging regions, shape: (batch_size, 1024)
Returns: - new_global: final image by self-attention, shape: (batch_size, 1024).
"""
def __init__(self, embed_dim, dropout_rate, num_region):
super(VisualSA, self).__init__()
self.embedding_local = nn.Sequential(nn.Linear(embed_dim, embed_dim),
nn.BatchNorm1D(num_region),
nn.Tanh(), nn.Dropout(dropout_rate))
self.embedding_global = nn.Sequential(nn.Linear(embed_dim, embed_dim),
nn.BatchNorm1D(embed_dim),
nn.Tanh(), nn.Dropout(dropout_rate))
self.embedding_common = nn.Sequential(nn.Linear(embed_dim, 1))
self.init_weights()
def init_weights(self):
for embeddings in self.children():
for m in embeddings:
if isinstance(m, nn.Linear):
r = np.sqrt(6.) / np.sqrt(m.weight.shape[0] + m.weight.shape[1])
v = np.random.uniform(-r, r, size=(m.weight.shape[0], m.weight.shape[1])).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(v)
m.bias.set_value(b)
elif isinstance(m, nn.BatchNorm1D):
a = np.ones(m.weight.shape).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(a)
m.bias.set_value(b)
def forward(self, local, raw_global):
# compute embedding of local regions and raw global image
l_emb = self.embedding_local(local)
g_emb = self.embedding_global(raw_global)
# compute the normalized weights, shape: (batch_size, 36)
g_emb = paddle.concat([g_emb.unsqueeze(1) for _ in range(l_emb.shape[1])], axis=1)
common = paddle.multiply(l_emb, g_emb)
weights = self.embedding_common(common).squeeze(2)
weights = F.softmax(weights, axis=1)
# compute final image, shape: (batch_size, 1024)
new_global = (weights.unsqueeze(2) * local).sum(axis=1)
new_global = l2norm(new_global, dim=-1)
return new_global
class TextSA(nn.Layer):
"""
Build global text representations by self-attention.
Args: - local: local word embeddings, shape: (batch_size, L, 1024)
- raw_global: raw text by averaging words, shape: (batch_size, 1024)
Returns: - new_global: final text by self-attention, shape: (batch_size, 1024).
"""
def __init__(self, embed_dim, dropout_rate):
super(TextSA, self).__init__()
self.embedding_local = nn.Sequential(nn.Linear(embed_dim, embed_dim),
nn.Tanh(), nn.Dropout(dropout_rate))
self.embedding_global = nn.Sequential(nn.Linear(embed_dim, embed_dim),
nn.Tanh(), nn.Dropout(dropout_rate))
self.embedding_common = nn.Sequential(nn.Linear(embed_dim, 1))
self.init_weights()
def init_weights(self):
for embeddings in self.children():
for m in embeddings:
if isinstance(m, nn.Linear):
r = np.sqrt(6.) / np.sqrt(m.weight.shape[0] + m.weight.shape[1])
v = np.random.uniform(-r, r, size=(m.weight.shape[0], m.weight.shape[1])).astype('float32')
b = | np.zeros(m.bias.shape) | numpy.zeros |
import types
import numpy as np
import sklearn
import torch
from sklearn.linear_model import RANSACRegressor
from utils.iou3d_nms import iou3d_nms_utils
from utils import kitti_util
def cart2hom(pts_3d):
n = pts_3d.shape[0]
pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1), dtype=np.float32)))
return pts_3d_hom
def transform_points(pts_3d_ref, Tr):
pts_3d_ref = cart2hom(pts_3d_ref) # nx4
return np.dot(pts_3d_ref, np.transpose(Tr)).reshape(-1, 4)[:, 0:3]
def load_velo_scan(velo_filename):
scan = np.fromfile(velo_filename, dtype=np.float32)
scan = scan.reshape((-1, 4))
return scan
def load_plane(plane_filename):
with open(plane_filename, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
def estimate_plane(origin_ptc, max_hs=-1.5, it=1, ptc_range=((-20, 70), (-20, 20))):
mask = (origin_ptc[:, 2] < max_hs) & \
(origin_ptc[:, 0] > ptc_range[0][0]) & \
(origin_ptc[:, 0] < ptc_range[0][1]) & \
(origin_ptc[:, 1] > ptc_range[1][0]) & \
(origin_ptc[:, 1] < ptc_range[1][1])
for _ in range(it):
ptc = origin_ptc[mask]
reg = RANSACRegressor().fit(ptc[:, [0, 1]], ptc[:, 2])
w = np.zeros(3)
w[0] = reg.estimator_.coef_[0]
w[1] = reg.estimator_.coef_[1]
w[2] = -1.0
h = reg.estimator_.intercept_
norm = np.linalg.norm(w)
w /= norm
h = h / norm
result = np.array((w[0], w[1], w[2], h))
result *= -1
mask = np.logical_not(above_plane(
origin_ptc[:, :3], result, offset=0.2))
return result
def above_plane(ptc, plane, offset=0.05, only_range=((-30, 30), (-30, 30))):
mask = distance_to_plane(ptc, plane, directional=True) < offset
if only_range is not None:
range_mask = (ptc[:, 0] < only_range[0][1]) * (ptc[:, 0] > only_range[0][0]) * \
(ptc[:, 1] < only_range[1][1]) * (ptc[:, 1] > only_range[1][0])
mask *= range_mask
return np.logical_not(mask)
def distance_to_plane(ptc, plane, directional=False):
d = ptc @ plane[:3] + plane[3]
if not directional:
d = np.abs(d)
d /= np.sqrt((plane[:3]**2).sum())
return d
import numpy as np
from scipy.spatial import ConvexHull
def minimum_bounding_rectangle(points):
"""
Find the smallest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
https://stackoverflow.com/questions/13542855/algorithm-to-find-the-minimum-area-rectangle-for-given-points-in-order-to-comput
:param points: an nx2 matrix of coordinates
:rval: an nx2 matrix of coordinates
"""
from scipy.ndimage.interpolation import rotate
pi2 = np.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = np.zeros((len(hull_points)-1, 2))
edges = hull_points[1:] - hull_points[:-1]
angles = np.zeros((len(edges)))
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
# find rotation matrices
rotations = np.vstack([
np.cos(angles),
np.cos(angles-pi2),
np.cos(angles+pi2),
np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = | np.argmin(areas) | numpy.argmin |
"""Sequence-to-sequence metrics"""
from typing import Dict, List
import math
from scipy.stats import pearsonr
import numpy as np
import logging
from overrides import overrides
import torch
from allennlp.training.metrics import Metric
logger = logging.getLogger(__name__)
@Metric.register("decomp")
class DecompAttrMetrics(Metric):
def __init__(self,
node_pearson_r: float = 0.0,
node_pearson_f1: float = 0.0,
edge_pearson_r: float = 0.0,
edge_pearson_f1: float = 0.0,
pearson_r: float = 0.0,
pearson_f1: float = 0.0) -> None:
self.node_pearson_r = node_pearson_r
self.node_pearson_f1 = node_pearson_f1
self.n_node_attrs = 0.
self.edge_pearson_r = edge_pearson_r
self.edge_pearson_f1 = edge_pearson_f1
self.n_edge_attrs = 0.
self.pearson_r = pearson_r
self.pearson_f1 = pearson_f1
@overrides
def __call__(self,
pred_attr: torch.Tensor,
pred_mask: torch.Tensor,
true_attr: torch.Tensor,
true_mask: torch.Tensor,
node_or_edge: str
) -> None:
# Attributes
pred_attr, pred_mask, true_attr, true_mask = self.unwrap_to_tensors(pred_attr, pred_mask, true_attr, true_mask)
if node_or_edge is not "both":
pred_mask = torch.gt(pred_mask, 0)
true_mask = torch.gt(true_mask, 0)
pred_attr = pred_attr * true_mask
true_attr = true_attr * true_mask
# for train time pearson, only look where attributes predicted
pred_attr = pred_attr[true_mask==1]
true_attr = true_attr[true_mask==1]
#flat_pred = (pred_attr * pred_mask).reshape((-1)).cpu().detach().numpy()
flat_pred = pred_attr.reshape(-1).cpu().detach().numpy()
flat_true = true_attr.reshape(-1).cpu().detach().numpy()
flat_mask = true_mask.reshape(-1).cpu().detach().numpy()
try:
pearson_r, __ = pearsonr(flat_pred, flat_true)
except ValueError:
pearson_r = 0.0
flat_pred_threshed = np.greater(flat_pred, 0.0)
flat_true_threshed = np.greater(flat_true, 0.0)
#tot = flat_true.shape[0]
tot = torch.sum(true_mask.float()).item()
tot_pred = np.sum(flat_pred_threshed)
tot_true = | np.sum(flat_true_threshed) | numpy.sum |
import os
import sys
import torch
import random
import numpy as np
import pandas as pd
from cqr import helper
from datasets import datasets
from sklearn import linear_model
from cqr.nonconformist.nc import NcFactory
from cqr.nonconformist.nc import RegressorNc
from cqr.nonconformist.nc import AbsErrorErrFunc
from cqr.nonconformist.nc import QuantileRegErrFunc
from cqr.nonconformist.nc import RegressorNormalizer
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from cqr.nonconformist.nc import QuantileRegAsymmetricErrFunc
pd.set_option('precision', 3)
base_dataset_path = './datasets/'
if os.path.isdir('/scratch'):
local_machine = 0
else:
local_machine = 1
if local_machine:
base_dataset_path = '/Users/romano/mydata/regression_data/'
else:
base_dataset_path = '/scratch/users/yromano/data/regression_data/'
plot_results = False
def run_experiment(dataset_name,
test_method,
random_state_train_test,
save_to_csv=True):
""" Estimate prediction intervals and print the average length and coverage
Parameters
----------
dataset_name : array of strings, list of datasets
test_method : string, method to be tested, estimating
the 90% prediction interval
random_state_train_test : integer, random seed to be used
save_to_csv : boolean, save average length and coverage to csv (True)
or not (False)
"""
dataset_name_vec = []
method_vec = []
coverage_vec = []
length_vec = []
seed_vec = []
seed = random_state_train_test
random.seed(seed)
| np.random.seed(seed) | numpy.random.seed |
import pickle
from datetime import datetime
import numpy as np
import pandas as pd
import xarray as xr
import yaml
from vmodel.util.util import clean_attrs
def generate_filename(args):
# Construct output file name
time_str = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
fnamedict = {
'agents': args.num_agents,
'runs': args.num_runs,
'times': args.num_timesteps,
'dist': args.ref_distance,
'perc': args.perception_radius,
'topo': args.max_agents,
'rngstd': args.range_std,
}
formatexts = {'netcdf': 'nc', 'pickle': 'pkl'}
args_str = '_'.join(f'{k}_{v}' for k, v in fnamedict.items())
return f'{time_str}_{args_str}.states.{formatexts[args.format]}'
def create_dataset(datas, args):
ds = xr.Dataset()
# Clean up attrs dict to be compatible with YAML and NETCDF
ds.attrs = clean_attrs(vars(args))
time = np.array(datas[0].time)
pos = np.array([d.pos for d in datas])
vel = np.array([d.vel for d in datas])
coord_run = np.arange(args.num_runs, dtype=int) + 1
coord_time = pd.to_timedelta(time, unit='s')
coord_agent = np.arange(args.num_agents, dtype=int) + 1
coord_space = | np.array(['x', 'y']) | numpy.array |
import unittest
import numpy as np
import jax
import jax.numpy as jnp
import functools
from timemachine.potentials import bonded, nonbonded, gbsa
from timemachine.lib import ops, custom_ops
from hilbertcurve.hilbertcurve import HilbertCurve
def prepare_gbsa_system(
x,
P_charges,
P_radii,
P_scale_factors,
alpha,
beta,
gamma,
dielectric_offset,
surface_tension,
solute_dielectric,
solvent_dielectric,
probe_radius,
cutoff_radii,
cutoff_force,
params=None,
precision=np.float64):
N = x.shape[0]
D = x.shape[1]
if params is None:
params = np.array([], dtype=np.float64)
# charges
charge_params = (np.random.rand(P_charges).astype(np.float64)-0.5)*np.sqrt(138.935456)
charge_param_idxs = np.random.randint(low=0, high=P_charges, size=(N), dtype=np.int32) + len(params)
params = np.concatenate([params, charge_params])
# gb radiis
radii_params = 1.5*np.random.rand(P_radii).astype(np.float64) + 1.0 # 1.0 to 2.5
radii_params = radii_params/10 # convert to nm form
radii_param_idxs = np.random.randint(low=0, high=P_radii, size=(N), dtype=np.int32) + len(params)
params = np.concatenate([params, radii_params])
# scale factors
scale_params = np.random.rand(P_scale_factors).astype(np.float64)/3 + 0.75
scale_param_idxs = np.random.randint(low=0, high=P_scale_factors, size=(N), dtype=np.int32) + len(params)
params = np.concatenate([params, scale_params])
custom_gb = ops.GBSA(
charge_param_idxs,
radii_param_idxs,
scale_param_idxs,
alpha,
beta,
gamma,
dielectric_offset,
surface_tension,
solute_dielectric,
solvent_dielectric,
probe_radius,
cutoff_radii,
cutoff_force,
D,
precision=precision
)
gbsa_obc_fn = functools.partial(
gbsa.gbsa_obc,
charge_idxs=charge_param_idxs,
radii_idxs=radii_param_idxs,
scale_idxs=scale_param_idxs,
alpha=alpha,
beta=beta,
gamma=gamma,
dielectric_offset=dielectric_offset,
surface_tension=surface_tension,
solute_dielectric=solute_dielectric,
solvent_dielectric=solvent_dielectric,
probe_radius=probe_radius,
cutoff_radii=cutoff_radii,
cutoff_force=cutoff_force
)
return params, [gbsa_obc_fn], [custom_gb]
def prepare_nonbonded_system(
x,
E, # number of exclusions
P_charges,
P_lj,
P_exc,
params=None,
p_scale=4.0,
e_scale=1.0,
cutoff=100.0,
custom_D=None,
precision=np.float64):
N = x.shape[0]
D = x.shape[1]
if params is None:
params = np.array([], dtype=np.float64)
charge_params = (np.random.rand(P_charges).astype(np.float64) - 0.5)*np.sqrt(138.935456)/e_scale
charge_param_idxs = np.random.randint(low=0, high=P_charges, size=(N), dtype=np.int32) + len(params)
params = np.concatenate([params, charge_params])
# <NAME>
lj_sig_params = np.random.rand(P_lj)/p_scale # we want these to be pretty small for numerical stability
lj_sig_idxs = np.random.randint(low=0, high=P_lj, size=(N,), dtype=np.int32) + len(params)
params = np.concatenate([params, lj_sig_params])
lj_eps_params = np.random.rand(P_lj)
lj_eps_idxs = np.random.randint(low=0, high=P_lj, size=(N,), dtype=np.int32) + len(params)
params = np.concatenate([params, lj_eps_params])
lj_param_idxs = np.stack([lj_sig_idxs, lj_eps_idxs], axis=-1)
# generate exclusion parameters
exclusion_idxs = np.random.randint(low=0, high=N, size=(E,2), dtype=np.int32)
for e_idx, (i,j) in enumerate(exclusion_idxs):
if i == j:
exclusion_idxs[e_idx][0] = i
exclusion_idxs[e_idx][1] = (j+1) % N # mod is in case we overflow
for e_idx, (i,j) in enumerate(exclusion_idxs):
if i == j:
raise Exception("BAD")
exclusion_params = np.random.rand(P_exc).astype(np.float64) # must be between 0 and 1
exclusion_charge_idxs = np.random.randint(low=0, high=P_exc, size=(E), dtype=np.int32) + len(params)
exclusion_lj_idxs = np.random.randint(low=0, high=P_exc, size=(E), dtype=np.int32) + len(params)
params = np.concatenate([params, exclusion_params])
if custom_D is None:
custom_D = D
custom_nonbonded = ops.Nonbonded(
charge_param_idxs,
lj_param_idxs,
exclusion_idxs,
exclusion_charge_idxs,
exclusion_lj_idxs,
cutoff,
custom_D,
precision=precision
)
lj_fn = functools.partial(nonbonded.lennard_jones, box=None, param_idxs=lj_param_idxs, cutoff=cutoff)
lj_fn_exc = functools.partial(nonbonded.lennard_jones_exclusion, box=None, param_idxs=lj_param_idxs, cutoff=cutoff, exclusions=exclusion_idxs, exclusion_scale_idxs=exclusion_lj_idxs)
es_fn = functools.partial(nonbonded.simple_energy, param_idxs=charge_param_idxs, cutoff=cutoff, exclusions=exclusion_idxs, exclusion_scale_idxs=exclusion_charge_idxs)
def ref_total_energy(x, p):
return lj_fn(x, p) - lj_fn_exc(x, p) + es_fn(x, p)
return params, [ref_total_energy], [custom_nonbonded]
def prepare_bonded_system(
x,
P_bonds,
P_angles,
P_torsions,
B,
A,
T,
precision):
N = x.shape[0]
D = x.shape[1]
atom_idxs = np.arange(N)
params = np.array([], dtype=np.float64);
bond_params = np.random.rand(P_bonds).astype(np.float64)
bond_param_idxs = np.random.randint(low=0, high=P_bonds, size=(B,2), dtype=np.int32) + len(params)
bond_idxs = []
for _ in range(B):
bond_idxs.append(np.random.choice(atom_idxs, size=2, replace=False))
bond_idxs = | np.array(bond_idxs, dtype=np.int32) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 07:45:38 2021
@author: <NAME>
"""
import os
import random
import numpy as np
from skimage.morphology import erosion, disk
from scipy.ndimage import rotate
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import load_img
class Pollen(keras.utils.Sequence):
"""Helper to iterate over the data (as Numpy arrays)."""
def __init__(self, batch_size,
img_size, input_img_paths,
target_img_paths1,target_img_paths2,
augment=True,junk_value=1):
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths = input_img_paths
self.target_img_paths1 = target_img_paths1
self.target_img_paths2 = target_img_paths2
self.augment = augment
self.junk_value = junk_value
def __len__(self):
return len(self.target_img_paths1) // self.batch_size
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
i = idx * self.batch_size
batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]
batch_target_img_paths1 = self.target_img_paths1[i : i + self.batch_size]
batch_target_img_paths2 = self.target_img_paths2[i : i + self.batch_size]
x, y, w = self.__data_generation(batch_input_img_paths,
batch_target_img_paths1,
batch_target_img_paths2)
return x, y
def __data_generation(self,
batch_input_path,
batch_target_img_paths1,
batch_target_img_paths2):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
x = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
y = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
w = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
for i, _ in enumerate(batch_input_path):
img = load_img(batch_input_path[i], target_size=self.img_size,color_mode="grayscale")
img1 = load_img(batch_target_img_paths1[i], target_size=self.img_size, color_mode="grayscale")
img2 = load_img(batch_target_img_paths2[i], target_size=self.img_size, color_mode="grayscale")
flipud, fliplr, rotate_angle = 0, 0 ,0
if self.augment:
flipud = np.random.random(1) > 0.5
fliplr = np.random.random(1) > 0.5
if np.random.random(1) > 0.5:
rotate_angle = np.random.randint(0,360,1)[0]
else:
rotate_angle = 0
temp_x = self.augment_f(img,flipud,fliplr,rotate_angle)
temp_y1 = self.augment_f(img1,flipud,fliplr,rotate_angle)
temp_y2 = self.augment_f(img2,flipud,fliplr,rotate_angle)
temp_y1 = temp_y1 > 128
temp_y2 = temp_y2 > 128
temp_y = temp_y1 * 2 + temp_y2 * self.junk_value
x[i,:,:,0] = temp_x
y[i,:,:,0] = temp_y
w += 0.1
w[np.where(y>0)]=1
w[np.where(y>1)]=2
return tf.convert_to_tensor(x/255.), tf.convert_to_tensor(y), tf.convert_to_tensor(w)
def augment_f(self,img,flipud,fliplr,rotate_angle):
temp_x = np.array(img)
if rotate_angle:
temp_x = np.around(rotate(temp_x,rotate_angle,reshape=False))
temp_x[np.where(temp_x<0)] = 0
if flipud:
temp_x = np.flip(temp_x,axis=0)
if fliplr:
temp_x = np.flip(temp_x,axis=1)
return temp_x
def on_epoch_end(self):
seed = np.random.randint(12345)
random.Random(seed).shuffle(self.input_img_paths)
random.Random(seed).shuffle(self.target_img_paths1)
random.Random(seed).shuffle(self.target_img_paths2)
class Pollen_synthetic(keras.utils.Sequence):
"""Helper to iterate over the data (as Numpy arrays)."""
def __init__(self,
batch_size,
step_per_epoch,
img_size,
input_img_paths,
value_dict,
validation=False):
self.batch_size = batch_size
self.step_per_epoch = step_per_epoch
self.img_size = img_size
self.input_img_paths = input_img_paths
self.value_dict = value_dict
self.validation = validation
def __len__(self):
return self.step_per_epoch
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
# Generate data
if self.validation:
random.seed(idx)
else:
random.seed(np.random.randint(0,913829128))
x, y, w = self.__data_generation(idx)
return x, y, w
def __data_generation(self, idx):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
i = 0
x = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
y = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
w = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
while i < self.batch_size:
part = random.randint(20,48)
image = np.zeros((1320,1640))
mask = np.zeros((1320,1640))
selection = self.get_pollens(self.input_img_paths,number_examples = part)
image,mask = self.add_pollen(image,mask,selection,self.value_dict)
x[i,:,:,0] = image[180:1320-180,180:1640-180]
y[i,:,:,0] = mask[180:1320-180,180:1640-180]
# y[i] = tf.keras.utils.to_categorical(mask[180:1320-180,180:1640-180], num_classes=20)
i+=1
w += 0.1
w[np.where(y>0)]=1
# w[np.where(y>1)]=2
return tf.convert_to_tensor(x/255.), tf.convert_to_tensor(y), tf.convert_to_tensor(w)
def get_pollens(self,pollen_dict,number_examples = 10):
keys = list(pollen_dict.keys())
ret_particles = []
while len(ret_particles) < number_examples:
key = np.random.choice(keys,)
ret_particles.append([key,random.choice(pollen_dict[key])])
# for i in range( np.random.randint(0,5)):
# ret_particles.append(["alternaria",random.choice(pollen_dict['alternaria'])])# Force to have at least one alternaria particle
return ret_particles
def add_pollen(self,current_image,current_mask,particles,value_dict):
for idx,particle in enumerate(particles):
key, path = particle
y_min = random.randint(0, 1280)
y_max = y_min + 360
x_min = random.randint(0, 960)
x_max = x_min + 360
img = load_img(path,
target_size=(360,360),
color_mode="grayscale")
img = np.array(img)
if not self.validation:
if self.augment:
flipud = np.random.random(1) > 0.5
fliplr = np.random.random(1) > 0.5
if np.random.random(1) > 0.75:
rotate_angle = np.random.randint(0,360,1)[0]
else:
rotate_angle = 0
img = self.augment(img,flipud,fliplr,rotate_angle)
mask = ( img > 0 )
reverse_mask = np.logical_not(mask)
value_mask = mask * value_dict[key]
current_image[x_min:x_max,y_min:y_max] = current_image[x_min:x_max,y_min:y_max] * reverse_mask + img
current_mask[x_min:x_max,y_min:y_max] = current_mask[x_min:x_max,y_min:y_max] * reverse_mask + value_mask
return current_image, current_mask
def augment(self,img,flipud,fliplr,rotate_angle):
temp_x = np.array(img)
if rotate_angle:
temp_x = np.around(rotate(temp_x,rotate_angle,reshape=False))
temp_x[np.where(temp_x<0)] = 0
if flipud:
temp_x = np.flip(temp_x,axis=0)
if fliplr:
temp_x = np.flip(temp_x,axis=1)
return temp_x
class Pollen_synthetic_inst(keras.utils.Sequence):
"""Helper to iterate over the data (as Numpy arrays)."""
def __init__(self,
batch_size,
step_per_epoch,
img_size,
input_img_paths,
value_dict,
validation=False):
self.batch_size = batch_size
self.step_per_epoch = step_per_epoch
self.img_size = img_size
self.input_img_paths = input_img_paths
self.value_dict = value_dict
self.validation = validation
def __len__(self):
return self.step_per_epoch
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
# Generate data
if self.validation:
random.seed(idx)
else:
random.seed(np.random.randint(0,913829128))
x, y, w = self.__data_generation(idx)
return x, y, w
def __data_generation(self, idx):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
i = 0
x = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
y_class = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
y_inst = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
w = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
while i < self.batch_size:
part = random.randint(48,64)
image = np.zeros((1320,1640))
mask_class = np.zeros((1320,1640))
mask_instance = np.zeros((1320,1640))
selection = self.get_pollens(self.input_img_paths,number_examples = part)
image,mask_class,mask_instance = self.add_pollen(image,mask_class,mask_instance,selection,self.value_dict)
x[i,:,:,0] = image[180:1320-180,180:1640-180]
y_class[i,:,:,0] = mask_class[180:1320-180,180:1640-180]
y_inst[i,:,:,0] = mask_instance[180:1320-180,180:1640-180]
i+=1
w += 0.5
w[np.where(y_class>0)]=1
# w[np.where(y>1)]=2
return tf.convert_to_tensor(x/255.),[ tf.convert_to_tensor(y_class),tf.convert_to_tensor(y_inst)], tf.convert_to_tensor(w)
def get_pollens(self,pollen_dict,number_examples = 10):
keys = list(pollen_dict.keys())
ret_particles = []
while len(ret_particles) < number_examples:
key = np.random.choice(keys,)
ret_particles.append([key,random.choice(pollen_dict[key])])
# for i in range( np.random.randint(0,5)):
# ret_particles.append(["alternaria",random.choice(pollen_dict['alternaria'])])# Force to have at least one alternaria particle
return ret_particles
def add_pollen(self,current_image,current_mask,mask_instance,particles,value_dict):
for idx,particle in enumerate(particles):
key, path = particle
y_min = random.randint(0, 1280)
y_max = y_min + 360
x_min = random.randint(0, 960)
x_max = x_min + 360
img = load_img(path,
target_size=(360,360),
color_mode="grayscale")
img = np.array(img)
if not self.validation:
if self.augment:
flipud = np.random.random(1) > 0.5
fliplr = np.random.random(1) > 0.5
if np.random.random(1) > 0.75:
rotate_angle = np.random.randint(0,360,1)[0]
else:
rotate_angle = 0
img = self.augment(img,flipud,fliplr,rotate_angle)
mask = ( img > 0 )
reverse_mask = np.logical_not(mask)
value_mask = mask * value_dict[key]
current_image[x_min:x_max,y_min:y_max] = current_image[x_min:x_max,y_min:y_max] * reverse_mask + img
current_mask[x_min:x_max,y_min:y_max] = current_mask[x_min:x_max,y_min:y_max] * reverse_mask + value_mask
mask_erroded = erosion(mask,selem=disk(5))
mask_instance[x_min:x_max,y_min:y_max] = mask_instance[x_min:x_max,y_min:y_max] * reverse_mask + mask_erroded
return current_image, current_mask,mask_instance
def augment(self,img,flipud,fliplr,rotate_angle):
temp_x = np.array(img)
if rotate_angle:
temp_x = np.around(rotate(temp_x,rotate_angle,reshape=False))
temp_x[np.where(temp_x<0)] = 0
if flipud:
temp_x = np.flip(temp_x,axis=0)
if fliplr:
temp_x = np.flip(temp_x,axis=1)
return temp_x
class Pollen_synthetic_v2(keras.utils.Sequence):
"""Helper to iterate over the data (as Numpy arrays)."""
def __init__(self,
batch_size,
step_per_epoch,
img_size,
input_img_paths,
value_dict,
background_path=None,
validation=False):
self.batch_size = batch_size
self.step_per_epoch = step_per_epoch
self.img_size = img_size
self.input_img_paths = input_img_paths
self.value_dict = value_dict
self.background_path = background_path
self.validation = validation
def __len__(self):
return self.step_per_epoch
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
# Generate data
if self.validation:
idx_seed = idx
else:
idx_seed = np.random.randint(0,913829128)
random.seed(idx_seed)
# np.random.seed(idx)
x, y, w = self.__data_generation(idx_seed)
return x, y, w
def __data_generation(self, idx):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
i = 0
x = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
y = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
w = np.zeros((self.batch_size,self.img_size[0],self.img_size[1],1))
random.seed(idx)
# np.random.seed(idx)
if self.background_path:
paths = [os.path.join(self.background_path,file_name) for file_name in os.listdir(self.background_path)]
while i < self.batch_size:
part = random.randint(20,48)
image = np.zeros((1320,1640))
mask = np.zeros((1320,1640))
if self.background_path and random.random() > 0.9 and not self.validation:
back_path = random.choice(paths)
background = load_img(back_path, target_size=(960,1280),color_mode="grayscale")
background = np.array(background)
flipud = random.random() > 0.5
fliplr = random.random() > 0.5
if random.random() > 0.75:
rotate_angle = random.randint(0,360)
else:
rotate_angle = 0
background = self.augment(background,flipud,fliplr,rotate_angle)
background_mask = background > 0
background_mask = background_mask * self.value_dict["junk"]
image[180:1320-180,180:1640-180] += background
mask[180:1320-180,180:1640-180] += background_mask
part = random.randint(8,20)
selection = self.get_pollens(self.input_img_paths,number_examples=part,seed=idx)
image,mask = self.add_pollen(image,mask,selection,self.value_dict,seed=idx)
x[i,:,:,0] = image[180:1320-180,180:1640-180]
y[i,:,:,0] = mask[180:1320-180,180:1640-180]
i+=1
w += 0.5
w[np.where(y>0)]=1
w[np.where(y==11)]=0.5
return tf.convert_to_tensor(x/255.), tf.convert_to_tensor(y), tf.convert_to_tensor(w)
def get_pollens(self,pollen_dict,number_examples = 10,seed=10):
random.seed(seed)
# np.random.seed(seed)
keys = list(pollen_dict.keys())
ret_particles = []
while len(ret_particles) < number_examples:
key = random.choice(keys,)
ret_particles.append([key,random.choice(pollen_dict[key])])
# for i in range(np.random.randint(0,5)):
# ret_particles.append(["alternaria",random.choice(pollen_dict['alternaria'])])# Force to have at least one alternaria particle
return ret_particles
def add_pollen(self,current_image,current_mask,particles,value_dict,seed=10):
random.seed(seed)
# np.random.seed(seed)
for idx,particle in enumerate(particles):
key, path = particle
y_min = random.randint(0, 1280)
y_max = y_min + 360
x_min = random.randint(0, 960)
x_max = x_min + 360
img = load_img(path,
target_size=(360,360),
color_mode="grayscale")
img = np.array(img)
if not self.validation:
flipud = random.random() > 0.5
fliplr = random.random() > 0.5
if random.random() > 0.75:
rotate_angle = random.randint(0,360)
else:
rotate_angle = 0
img = self.augment(img,flipud,fliplr,rotate_angle)
mask = ( img > 0 )
reverse_mask = | np.logical_not(mask) | numpy.logical_not |
from __future__ import print_function
import numpy as np
import unittest
import discretize
MESHTYPES = ['uniformTree', 'randomTree']
# MESHTYPES = ['randomTree']
call2 = lambda fun, xyz: fun(xyz[:, 0], xyz[:, 1])
call3 = lambda fun, xyz: fun(xyz[:, 0], xyz[:, 1], xyz[:, 2])
cart_row2 = lambda g, xfun, yfun: np.c_[call2(xfun, g), call2(yfun, g)]
cart_row3 = lambda g, xfun, yfun, zfun: np.c_[call3(xfun, g), call3(yfun, g), call3(zfun, g)]
cartF2 = lambda M, fx, fy: np.vstack((cart_row2(M.gridFx, fx, fy), cart_row2(M.gridFy, fx, fy)))
cartE2 = lambda M, ex, ey: np.vstack((cart_row2(M.gridEx, ex, ey), cart_row2(M.gridEy, ex, ey)))
cartF3 = lambda M, fx, fy, fz: np.vstack((cart_row3(M.gridFx, fx, fy, fz), cart_row3(M.gridFy, fx, fy, fz), cart_row3(M.gridFz, fx, fy, fz)))
cartE3 = lambda M, ex, ey, ez: np.vstack((cart_row3(M.gridEx, ex, ey, ez), cart_row3(M.gridEy, ex, ey, ez), cart_row3(M.gridEz, ex, ey, ez)))
# np.random.seed(None)
# np.random.seed(7)
class TestCellGrad2D(discretize.Tests.OrderTest):
name = "Cell Gradient 2D, using cellGradx and cellGrady"
meshTypes = MESHTYPES
meshDimension = 2
meshSizes = [8, 16]
# because of the averaging involved in the ghost point. u_b = (u_n + u_g)/2
expectedOrders = 1
def getError(self):
#Test function
sol = lambda x, y: np.cos(2*np.pi*x)*np.cos(2*np.pi*y)
fx = lambda x, y: -2*np.pi*np.sin(2*np.pi*x)*np.cos(2*np.pi*y)
fy = lambda x, y: -2*np.pi*np.sin(2*np.pi*y)*np.cos(2*np.pi*x)
phi = call2(sol, self.M.gridCC)
gradF = self.M.cellGrad * phi
Fc = cartF2(self.M, fx, fy)
gradF_ana = self.M.projectFaceVector(Fc)
err = np.linalg.norm((gradF-gradF_ana), np.inf)
return err
def test_order(self):
np.random.seed(7)
self.orderTest()
class TestCellGrad3D(discretize.Tests.OrderTest):
name = "Cell Gradient 3D, using cellGradx, cellGrady, and cellGradz"
meshTypes = MESHTYPES
meshDimension = 3
meshSizes = [8, 16]
# because of the averaging involved in the ghost point. u_b = (u_n + u_g)/2
expectedOrders = 1
def getError(self):
#Test function
sol = lambda x, y, z: np.cos(2*np.pi*x)*np.cos(2*np.pi*y)*np.cos(2*np.pi*z)
fx = lambda x, y, z: -2*np.pi*np.sin(2*np.pi*x)*np.cos(2*np.pi*y)*np.cos(2*np.pi*z)
fy = lambda x, y, z: -2*np.pi*np.cos(2*np.pi*x)*np.sin(2*np.pi*y)*np.cos(2*np.pi*z)
fz = lambda x, y, z: -2*np.pi*np.cos(2*np.pi*x)*np.cos(2*np.pi*y)*np.sin(2*np.pi*z)
phi = call3(sol, self.M.gridCC)
gradF = self.M.cellGrad * phi
Fc = cartF3(self.M, fx, fy, fz)
gradF_ana = self.M.projectFaceVector(Fc)
err = np.linalg.norm((gradF-gradF_ana), np.inf)
return err
def test_order(self):
np.random.seed(6)
self.orderTest()
class TestFaceDivxy2D(discretize.Tests.OrderTest):
name = "Face Divergence 2D, Testing faceDivx and faceDivy"
meshTypes = MESHTYPES
meshDimension = 2
meshSizes = [16, 32]
def getError(self):
#Test function
fx = lambda x, y: np.sin(2*np.pi*x)
fy = lambda x, y: np.sin(2*np.pi*y)
sol = lambda x, y: 2*np.pi*(np.cos(2*np.pi*x)+np.cos(2*np.pi*y))
Fx = call2(fx, self.M.gridFx)
Fy = call2(fy, self.M.gridFy)
divFx = self.M.faceDivx.dot(Fx)
divFy = self.M.faceDivy.dot(Fy)
divF = divFx + divFy
divF_ana = call2(sol, self.M.gridCC)
err = np.linalg.norm((divF-divF_ana), np.inf)
# self.M.plotImage(divF-divF_ana, show_it=True)
return err
def test_order(self):
np.random.seed(4)
self.orderTest()
class TestFaceDiv3D(discretize.Tests.OrderTest):
name = "Face Divergence 3D"
meshTypes = MESHTYPES
meshSizes = [8, 16, 32]
def getError(self):
fx = lambda x, y, z: np.sin(2*np.pi*x)
fy = lambda x, y, z: np.sin(2*np.pi*y)
fz = lambda x, y, z: np.sin(2*np.pi*z)
sol = lambda x, y, z: (2*np.pi*np.cos(2*np.pi*x)+2*np.pi*np.cos(2*np.pi*y)+2*np.pi*np.cos(2*np.pi*z))
Fc = cartF3(self.M, fx, fy, fz)
F = self.M.projectFaceVector(Fc)
divF = self.M.faceDiv.dot(F)
divF_ana = call3(sol, self.M.gridCC)
return np.linalg.norm((divF-divF_ana), np.inf)
def test_order(self):
np.random.seed(7)
self.orderTest()
class TestFaceDivxyz3D(discretize.Tests.OrderTest):
name = "Face Divergence 3D, Testing faceDivx, faceDivy, and faceDivz"
meshTypes = MESHTYPES
meshDimension = 3
meshSizes = [8, 16, 32]
def getError(self):
#Test function
fx = lambda x, y, z: np.sin(2*np.pi*x)
fy = lambda x, y, z: np.sin(2*np.pi*y)
fz = lambda x, y, z: np.sin(2*np.pi*z)
sol = lambda x, y, z: (2*np.pi*np.cos(2*np.pi*x)+2*np.pi*np.cos(2*np.pi*y)+2*np.pi*np.cos(2*np.pi*z))
Fx = call3(fx, self.M.gridFx)
Fy = call3(fy, self.M.gridFy)
Fz = call3(fz, self.M.gridFz)
divFx = self.M.faceDivx.dot(Fx)
divFy = self.M.faceDivy.dot(Fy)
divFz = self.M.faceDivz.dot(Fz)
divF = divFx + divFy + divFz
divF_ana = call3(sol, self.M.gridCC)
err = np.linalg.norm((divF-divF_ana), np.inf)
# self.M.plotImage(divF-divF_ana, show_it=True)
return err
def test_order(self):
np.random.seed(7)
self.orderTest()
class TestCurl(discretize.Tests.OrderTest):
name = "Curl"
meshTypes = ['notatreeTree', 'uniformTree'] #, 'randomTree']#, 'uniformTree']
meshSizes = [8, 16]#, 32]
expectedOrders = [2, 1] # This is due to linear interpolation in the Re projection
def getError(self):
# fun: i (cos(y)) + j (cos(z)) + k (cos(x))
# sol: i (sin(z)) + j (sin(x)) + k (sin(y))
funX = lambda x, y, z: np.cos(2*np.pi*y)
funY = lambda x, y, z: np.cos(2*np.pi*z)
funZ = lambda x, y, z: np.cos(2*np.pi*x)
solX = lambda x, y, z: 2*np.pi*np.sin(2*np.pi*z)
solY = lambda x, y, z: 2*np.pi*np.sin(2*np.pi*x)
solZ = lambda x, y, z: 2*np.pi* | np.sin(2*np.pi*y) | numpy.sin |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import math
from typing import Optional, Tuple
import numpy as np
import mxnet as mx
from gluonts.core.component import validated
from gluonts.mx import Tensor
from . import bijection
from .distribution import Distribution, _sample_multiple, getF
from .distribution_output import (
ArgProj,
DistributionOutput,
AffineTransformedDistribution,
)
from gluonts.mx.distribution.transformed_distribution import TransformedDistribution
sigma_minimum = 1e-3
# sort samples in quantile bins
def get_quantile(sorted_samples, q):
# sorted_samples has shape = (num_samples,batch_size,seq_len,1), dimension is fixed at this point
# sorted_samples = mx.nd.squeeze(sorted_samples, axis=-1) #remove dim axis which only has length 1
# same sample_idx *for each* batch_size and seq_len point.
num_samples = sorted_samples.shape[0]
sample_idx = int(np.round((num_samples - 1) * q)) # round up because y >= q_pred
return sorted_samples[sample_idx, :, :] # return dim is (batch_size, seq_len)
# compute quantile loss for single quantile
def quantile_loss(sorted_samples, y, q):
# sorted_samples has shape = (num_samples,batch_size,seq_len,1)
# q is a scalar
# I think get_quantile function is outside of the mxnet 'path'
# quantile_pred has shape = (batch_size,seq_len,1)
quantile_pred = get_quantile(sorted_samples, q) # shape = (batch_size, seq_len, 1)
assert (y.shape == quantile_pred.shape)
# return shape is (batch_size,seq_len,1)
return mx.nd.where(
y >= quantile_pred,
q * (y - quantile_pred), # if >=
(1 - q) * (quantile_pred - y)
)
def capacitance_tril(F, rank: Tensor, W: Tensor, D: Tensor) -> Tensor:
r"""
Parameters
----------
F
rank
W : (..., dim, rank)
D : (..., dim)
Returns
-------
the capacitance matrix :math:`I + W^T D^{-1} W`
"""
# (..., dim, rank)
Wt_D_inv_t = F.broadcast_div(W, D.expand_dims(axis=-1))
# (..., rank, rank)
K = F.linalg_gemm2(Wt_D_inv_t, W, transpose_a=True)
# (..., rank, rank)
Id = F.broadcast_mul(F.ones_like(K), F.eye(rank))
# (..., rank, rank)
return F.linalg.potrf(K + Id)
def log_det(F, batch_D: Tensor, batch_capacitance_tril: Tensor) -> Tensor:
r"""
Uses the matrix determinant lemma.
.. math::
\log|D + W W^T| = \log|C| + \log|D|,
where :math:`C` is the capacitance matrix :math:`I + W^T D^{-1} W`, to compute the log determinant.
Parameters
----------
F
batch_D
batch_capacitance_tril
Returns
-------
"""
log_D = batch_D.log().sum(axis=-1)
log_C = 2 * F.linalg.sumlogdiag(batch_capacitance_tril)
return log_C + log_D
def mahalanobis_distance(
F, W: Tensor, D: Tensor, capacitance_tril: Tensor, x: Tensor
) -> Tensor:
r"""
Uses the Woodbury matrix identity
.. math::
(W W^T + D)^{-1} = D^{-1} - D^{-1} W C^{-1} W^T D^{-1},
where :math:`C` is the capacitance matrix :math:`I + W^T D^{-1} W`, to compute the squared
Mahalanobis distance :math:`x^T (W W^T + D)^{-1} x`.
Parameters
----------
F
W
(..., dim, rank)
D
(..., dim)
capacitance_tril
(..., rank, rank)
x
(..., dim)
Returns
-------
"""
xx = x.expand_dims(axis=-1)
# (..., rank, 1)
Wt_Dinv_x = F.linalg_gemm2(
F.broadcast_div(W, D.expand_dims(axis=-1)), xx, transpose_a=True
)
# compute x^T D^-1 x, (...,)
maholanobis_D_inv = F.broadcast_div(x.square(), D).sum(axis=-1)
# (..., rank)
L_inv_Wt_Dinv_x = F.linalg_trsm(capacitance_tril, Wt_Dinv_x).squeeze(
axis=-1
)
maholanobis_L = L_inv_Wt_Dinv_x.square().sum(axis=-1).squeeze()
return F.broadcast_minus(maholanobis_D_inv, maholanobis_L)
def lowrank_log_likelihood(
rank: int, mu: Tensor, D: Tensor, W: Tensor, x: Tensor
) -> Tensor:
F = getF(mu)
dim = F.ones_like(mu).sum(axis=-1).max()
dim_factor = dim * math.log(2 * math.pi)
if W is not None:
batch_capacitance_tril = capacitance_tril(F=F, rank=rank, W=W, D=D)
log_det_factor = log_det(
F=F, batch_D=D, batch_capacitance_tril=batch_capacitance_tril
)
mahalanobis_factor = mahalanobis_distance(
F=F, W=W, D=D, capacitance_tril=batch_capacitance_tril, x=x - mu
)
else:
log_det_factor = D.log().sum(axis=-1)
x_centered = x - mu
mahalanobis_factor = F.broadcast_div(x_centered.square(), D).sum(axis=-1)
ll: Tensor = -0.5 * (
F.broadcast_add(dim_factor, log_det_factor) + mahalanobis_factor
)
return ll
class LowrankMultivariateGaussian(Distribution):
r"""
Multivariate Gaussian distribution, with covariance matrix parametrized
as the sum of a diagonal matrix and a low-rank matrix
.. math::
\Sigma = D + W W^T
When `W = None` the covariance matrix is just diagonal.
The implementation is strongly inspired from Pytorch:
https://github.com/pytorch/pytorch/blob/master/torch/distributions/lowrank_multivariate_normal.py.
Complexity to compute log_prob is :math:`O(dim * rank + rank^3)` per element.
Parameters
----------
dim
Dimension of the distribution's support
rank
Rank of W
mu
Mean tensor, of shape (..., dim)
D
Diagonal term in the covariance matrix, of shape (..., dim)
W
Low-rank factor in the covariance matrix, of shape (..., dim, rank)
"""
is_reparameterizable = True
@validated()
def __init__(
self, dim: int, rank: int, mu: Tensor, D: Tensor, W: Optional[Tensor] = None
) -> None:
self.dim = dim
self.rank = rank
self.mu = mu
self.D = D
self.W = W
self.Cov = None
@property
def F(self):
return getF(self.mu)
@property
def batch_shape(self) -> Tuple:
return self.mu.shape[:-1]
@property
def event_shape(self) -> Tuple:
return self.mu.shape[-1:]
@property
def event_dim(self) -> int:
return 1
def log_prob(self, x: Tensor) -> Tensor:
return lowrank_log_likelihood(
rank=self.rank, mu=self.mu, D=self.D, W=self.W, x=x
)
def crps(self, samples: Tensor, y: Tensor, quantiles=np.arange(0.1, 1.0, 0.1)) -> Tensor:
r"""
Compute the *continuous rank probability score* (CRPS) of `y` according
to the distribution.
Parameters
----------
samples
Tensor of shape `(*batch_shape, *event_shape)`.
y
Tensor of ground truth
Returns
-------
Tensor
Tensor of shape `batch_shape` containing the CRPS score,
according to the distribution, for each event in `x`.
"""
# y is ground truth. Has shape (batch_size, seq_len, m)
# samples has shape = (num_samples, batch_size, seq_len, m)
# sum over m axis, sum over T axis, sum over bs axis
# loss for single ground truth point across all dimensions
# loop through dimensions
losses = []
for d in range(samples.shape[-1]):
# dim of dim_slice = (num_samples,batch_size,seq_len,1)
dim_slice = mx.nd.slice_axis(samples, axis=-1, begin=d, end=d + 1)
# sort samples along sample axis. shape = (num_samples,batch_size,seq_len,1)
sorted_slice = mx.nd.sort(dim_slice, axis=0) # sort along sample axis (first axis)
# slice of y for dimension d. shape = (batch_size, seq_len,1)
y_slice = mx.nd.slice_axis(y, axis=-1, begin=d, end=d + 1)
# compute quantile loss, shape = (batch_size, seq_len, 1)
#qloss = mx.nd.zeros((y_slice.shape))
qlosses = []
for q in quantiles:
qlosses.append(quantile_loss(sorted_slice, y_slice, q))
#qloss = quantile_loss(sorted_slice, y_slice, .1)
qloss = mx.nd.stack(*qlosses, axis=-1) #shape = (batch_size, seq_len, 1, Q)
#take average
qloss = (1/len(qlosses)) * mx.nd.sum(qloss, axis=-1) #shape = (batch_size, seq_len,1)
# append qloss tensor
losses.append(mx.nd.squeeze(qloss)) # remove dummy last axis of dim_slice and append
loss = mx.nd.stack(*losses, axis=-1) # shape = (batch_size, seq_len,m)
#return mx.nd.sum(loss, axis=-1).expand_dims(-1) # shape = (batch_size, seq_len,1)
return loss #shape = (batch_size, seq_len,m)
@property
def mean(self) -> Tensor:
return self.mu
@property
def variance(self) -> Tensor:
assert self.dim is not None
F = self.F
if self.Cov is not None:
return self.Cov
# reshape to a matrix form (..., d, d)
D_matrix = self.D.expand_dims(-1) * F.eye(self.dim)
if self.W is not None:
W_matrix = F.linalg_gemm2(self.W, self.W, transpose_b=True)
self.Cov = D_matrix + W_matrix
else:
self.Cov = D_matrix
return self.Cov
def sample_rep(self, num_samples: Optional[int] = None, dtype=np.float32) -> Tensor:
r"""
Draw samples from the multivariate Gaussian distribution:
.. math::
s = \mu + D u + W v,
where :math:`u` and :math:`v` are standard normal samples.
Parameters
----------
num_samples
number of samples to be drawn.
dtype
Data-type of the samples.
Returns
-------
tensor with shape (num_samples, ..., dim)
"""
def s(mu: Tensor, D: Tensor, W: Optional[Tensor]=None) -> Tensor:
F = getF(mu)
samples_D = F.sample_normal(
mu=F.zeros_like(mu), sigma=F.ones_like(mu), dtype=dtype
)
cov_D = D.sqrt() * samples_D
if W is not None:
# dummy only use to get the shape (..., rank, 1)
dummy_tensor = F.linalg_gemm2(
W, mu.expand_dims(axis=-1), transpose_a=True
).squeeze(axis=-1)
samples_W = F.sample_normal(
mu=F.zeros_like(dummy_tensor),
sigma=F.ones_like(dummy_tensor),
dtype=dtype,
)
cov_W = F.linalg_gemm2(W, samples_W.expand_dims(axis=-1)).squeeze(
axis=-1
)
samples = mu + cov_D + cov_W
else:
samples = mu + cov_D
return samples
return _sample_multiple(
s, mu=self.mu, D=self.D, W=self.W, num_samples=num_samples
)
def inv_softplus(y):
if y < 20.0:
# y = log(1 + exp(x)) ==> x = log(exp(y) - 1)
return np.log( | np.exp(y) | numpy.exp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 3 21:31:48 2019
@author: bill
This contains all the functions needed to execute the main NMF Analysis strategy as contained in the NMF_Analysis class.
"""
import pickle
import numpy as np
import scipy.sparse
from sklearn.decomposition import NMF
import sklearn.preprocessing
import scipy
'''
Modifications to H that ensure each topic is mapped to a unit vector in the term space.
'''
def norm_fun(vector):
return np.linalg.norm(vector) #Normalizing the vector to have a length of one in topic space.
def b_mat(H):
num_topics = np.shape(H)[0]
B = np.zeros((num_topics,num_topics), dtype = float)
B_inv = np.zeros((num_topics,num_topics), dtype = float)
for topic in range(num_topics):
norm = norm_fun(H[topic])
B[topic,topic] = 1/norm
B_inv[topic,topic] = norm
return B, B_inv
'''
The main function to run NMF on the desired number of topics.
'''
def run_ensemble_NMF_strategy(num_topics, num_folds, num_runs, num_docs, doc_term_matrix):
#Defines the number of elements in each fold and ensures that the total sums correctly
fold_sizes = (num_docs // num_folds) * | np.ones(num_folds, dtype=np.int) | numpy.ones |
import collections
import numpy as np
from sklearn.linear_model import LogisticRegression
import sklearn.metrics.pairwise
from . import cluster
class ActiveLearner:
"The general framework of batch-mode pool-based active learning algorithm."
def __init__(self, X, batch_size=20, initial_batch_size=None, classifier=LogisticRegression(max_iter=200)):
"The default classifier to be used is logistic regression"
self.batch_size = batch_size
self.initial_batch_size = (
initial_batch_size if initial_batch_size is not None
else batch_size)
self.n_batch = 0 # Starting the first batch
self.X = X # Training set, each row is a vectorized instance
self.y = np.zeros(X.shape[0]) # The labels, assuming initially unlabeled
self.L = | np.array([]) | numpy.array |
import sqlalchemy as db
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import sessionmaker, relationship, backref
from spt3g import core as spt3g_core
import so3g
import datetime as dt
import os
import re
from tqdm import tqdm
import numpy as np
import yaml
import ast
from collections import namedtuple
from enum import Enum
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from .. import core
from . import load as io_load
from sotodlib.io.g3tsmurf_db import (Base, Observations, Tags, Files, Tunes,
TuneSets, ChanAssignments, Channels,
FrameType, Frames)
Session = sessionmaker()
num_bias_lines = 16
"""
Actions used to define when observations happen
Could be expanded to other Action Based Indexing as well
Strings must be unique, in that they must only show up when they should be used
as observations
"""
SMURF_ACTIONS = {
'observations':[
'take_stream_data',
'stream_data_on',
'take_noise_psd',
'take_g3_data',
'stream_g3_on',
],
}
# Types of Frames we care about indexing
type_key = ['Observation', 'Wiring', 'Scan']
class TimingParadigm(Enum):
G3Timestream = 1
SmurfUnixTime = 2
TimingSystem = 3
Mixed = 4
def get_sample_timestamps(frame):
"""
Gets timestamps of samples in a G3Frame. This will try to get highest
precision first and move to lower precision methods if that fails.
Args
------
frame (spt3g_core.G3Frame):
A G3Frame(Scan) containing streamed detector data.
Returns
---------
times (np.ndarray):
numpy array containing timestamps in seconds
paradigm (TimingParadigm):
Paradigm used to calculate timestamps.
"""
logger.warning("get_sample_timestamps is deprecated, how did you get here?")
if 'primary' in frame.keys():
if False:
# Do high precision timing calculation here when we have real data
pass
else:
# Try to calculate the timestamp based on the SmurfProcessor's
# "UnixTime" and the G3Timestream start time. "UnixTime" is a
# 32-bit nanosecond clock that steadily increases mod 2**32.
unix_times = np.array(frame['primary']['UnixTime'])
for i in np.where(np.diff(unix_times) < 0)[0]:
# This corrects for any wrap around
unix_times[i+1:] += 2**32
times = frame['data'].start.time / spt3g_core.G3Units.s \
+ (unix_times - unix_times[0]) / 1e9
return times, TimingParadigm.SmurfUnixTime
else:
# Calculate timestamp based on G3Timestream.times(). Note that this
# only uses the timestream start and end time, and assumes samples are
# equispaced.
times = np.array([t.time / spt3g_core.G3Units.s
for t in frame['data'].times()])
return times, TimingParadigm.G3Timestream
class G3tSmurf:
def __init__(self, archive_path, db_path=None, meta_path=None,
echo=False):
if db_path is None:
db_path = os.path.join(archive_path, 'frames.db')
self.archive_path = archive_path
self.meta_path = meta_path
self.db_path = db_path
self.engine = db.create_engine(f"sqlite:///{db_path}", echo=echo)
Session.configure(bind=self.engine)
self.Session = sessionmaker(bind=self.engine)
Base.metadata.create_all(self.engine)
# Defines frame_types
self._create_frame_types()
def _create_frame_types(self):
session = self.Session()
if not session.query(FrameType).all():
print("Creating FrameType table...")
for k in type_key:
ft = FrameType(type_name=k)
session.add(ft)
session.commit()
@staticmethod
def _make_datetime(x):
"""
Takes an input (either a timestamp or datetime), and returns a datetime.
Intended to allow flexibility in inputs for various other functions
Args
----
x: input datetime of timestamp
Returns
----
datetime: datetime of x if x is a timestamp
"""
if np.issubdtype(type(x),np.floating) or np.issubdtype(type(x),np.integer):
return dt.datetime.utcfromtimestamp(x)
elif isinstance(x,np.datetime64):
return x.astype(dt.datetime)
elif isinstance(x,dt.datetime) or isinstance(x,dt.date):
return x
raise(Exception("Input not a datetime or timestamp"))
def add_file(self, path, session, overwrite=False):
"""
Indexes a single file and adds it to the sqlite database. Creates a
single entry in Files and as many Frame entries as there are frames in
the file.
Args
----
path: path
Path of the file to index
session : SQLAlchemy session
Current, active sqlalchemy session
overwrite : bool
If true and file exists in the database, update it.
"""
frame_types = {
ft.type_name: ft for ft in session.query(FrameType).all()
}
## name has a unique constraint in table
db_file = session.query(Files).filter(Files.name==path).one_or_none()
if db_file is None:
db_file = Files(name=path)
session.add(db_file)
elif not overwrite:
logger.info(f"File {path} found in database, use overwrite=True to update")
return
else:
logger.debug(f"File {path} found in database, updating entry and re-making frames")
db_frames = db_file.frames
[session.delete( frame ) for frame in db_frames];
session.commit()
try:
splits = path.split('/')
db_file.stream_id = splits[-2]
except:
## should this fail silently?
pass
reader = so3g.G3IndexedReader(path)
total_channels = 0
file_start, file_stop = None, None
frame_idx = -1
while True:
try:
db_frame_offset = reader.Tell()
frames = reader.Process(None)
if not frames:
break
except RuntimeError as e:
logger.warning(f"Failed to add {path}: file likely corrupted")
session.rollback()
return
frame = frames[0]
frame_idx += 1
if str(frame.type) not in type_key:
continue
db_frame_frame_type = frame_types[str(frame.type)]
timestamp = frame['time'].time / spt3g_core.G3Units.s
db_frame_time = dt.datetime.utcfromtimestamp(timestamp)
if str(frame.type) != 'Wiring':
dump = False
else:
dump = bool(frame['dump'])
## only make Frame once the non-nullable fields are known
db_frame = Frames(frame_idx=frame_idx, file=db_file,
offset = db_frame_offset,
frame_type = db_frame_frame_type,
time = db_frame_time,
status_dump = dump,
)
data = frame.get('data')
sostream_version = frame.get('sostream_version', 0)
if data is not None:
if sostream_version >= 2: # Using SuperTimestreams
db_frame.n_channels = len(data.names)
db_frame.n_samples = len(data.times)
db_frame.start = dt.datetime.utcfromtimestamp(
data.times[0].time / spt3g_core.G3Units.s
)
db_frame.stop = dt.datetime.utcfromtimestamp(
data.times[-1].time / spt3g_core.G3Units.s
)
else:
db_frame.n_samples = data.n_samples
db_frame.n_channels = len(data)
db_frame.start = dt.datetime.utcfromtimestamp(data.start.time /spt3g_core.G3Units.s)
db_frame.stop = dt.datetime.utcfromtimestamp(data.stop.time /spt3g_core.G3Units.s)
if file_start is None:
file_start = db_frame.start
file_stop = db_frame.stop
total_channels = max(total_channels, db_frame.n_channels)
session.add(db_frame)
db_file.start = file_start
db_file.stop = file_stop
db_file.n_channels = total_channels
db_file.n_frames = frame_idx
def index_archive(self, verbose=False, stop_at_error=False,
skip_old_format=True, min_ctime=None, max_ctime=None):
"""
Adds all files from an archive to the File and Frame sqlite tables.
Files must be indexed before the metadata entries can be made.
Args
----
verbose: bool
Verbose mode
stop_at_error: bool
If True, will stop if there is an error indexing a file.
skip_old_format: bool
If True, will skip over indexing files before the name convention
was changed to be ctime_###.g3.
min_ctime: int, float, or None
If set, files with session-ids less than this ctime will be
skipped.
max_ctime: int, float, or None
If set, files with session-ids higher than this ctime will be
skipped.
"""
session = self.Session()
indexed_files = [f[0] for f in session.query(Files.name).all()]
files = []
for root, _, fs in os.walk(self.archive_path):
for f in fs:
path = os.path.join(root, f)
if path.endswith('.g3') and path not in indexed_files:
if skip_old_format and '2020-' in path:
continue
if '-' not in f and (min_ctime is not None):
# We know the filename is <ctime>_###.g3
session_id = int(f.split('_')[0])
if session_id < min_ctime:
continue
if '-' not in f and (max_ctime is not None):
# We know the filename is <ctime>_###.g3
session_id = int(f.split('_')[0])
if session_id > max_ctime:
continue
files.append(path)
if verbose:
print(f"Indexing {len(files)} files...")
for f in tqdm(sorted(files)[::-1]):
try:
self.add_file(os.path.join(root, f), session)
session.commit()
except IntegrityError as e:
# Database Integrity Errors, such as duplicate entries
session.rollback()
print(e)
except RuntimeError as e:
# End of stream errors, for G3Files that were not fully flushed
session.rollback()
print(f"Failed on file {f} due to end of stream error!")
except Exception as e:
# This will catch generic errors such as attempting to load
# out-of-date files that do not have the required frame
# structure specified in the TOD2MAPS docs.
session.rollback()
if stop_at_error:
raise e
elif verbose:
print(f"Failed on file {f}:\n{e}")
session.close()
def add_new_channel_assignment(self, stream_id, ctime, cha,
cha_path, session):
"""Add new entry to the Channel Assignments table. Called by the
index_metadata function.
Args
-------
stream_id : string
The stream id for the particular SMuRF slot
ctime : int
The ctime of the SMuRF action called to create the channel
assignemnt
cha : string
The file name of the channel assignment
cha_path : path
The absolute path to the channel assignment
session : SQLAlchemy Session
The active session
"""
band = int(re.findall('b\d.txt', cha)[0][1])
ch_assign = session.query(ChanAssignments).filter(
ChanAssignments.ctime== ctime,
ChanAssignments.stream_id== stream_id,
ChanAssignments.band== band
)
ch_assign = ch_assign.one_or_none()
if ch_assign is None:
ch_assign = ChanAssignments(ctime=ctime,
path=cha_path,
name=cha,
stream_id=stream_id,
band=band)
session.add(ch_assign)
notches = np.atleast_2d(np.genfromtxt(ch_assign.path, delimiter=','))
if np.sum([notches[:,2]!=-1]) != len(ch_assign.channels):
ch_made = [c.channel for c in ch_assign.channels]
for notch in notches:
## smurf did not assign a channel
if notch[2] == -1:
continue
if int(notch[2]) in ch_made:
continue
ch_name = 'sch_{}_{:10d}_{:01d}_{:03d}'.format(stream_id, ctime,
band, int(notch[2]))
ch = Channels(subband=int(notch[1]),
channel=int(notch[2]),
frequency=notch[0],
name=ch_name,
chan_assignment=ch_assign,
band=band)
if ch.channel == -1:
logger.warning(f"Un-assigned channel made in Channel Assignment {ch_assign.name}")
continue
check = session.query(Channels).filter(
Channels.ca_id == ch_assign.id,
Channels.channel == ch.channel).one_or_none()
if check is None:
session.add(ch)
session.commit()
def _assign_set_from_file(self, tune_path, ctime=None, stream_id=None,
session=None):
"""Build set of Channel Assignments that are (or should be) in the
tune file.
Args
-------
tune_path : path
The absolute path to the tune file
ctime : int
ctime of SMuRF Action
stream_id : string
The stream id for the particular SMuRF slot
session : SQLAlchemy Session
The active session
"""
if session is None:
session = self.Session()
if stream_id is None:
stream_id = tune_path.split('/')[-4]
if ctime is None:
ctime = int( tune_path.split('/')[-1].split('_')[0] )
data = np.load(tune_path, allow_pickle=True).item()
assign_set = []
### Determine the TuneSet
for band in data.keys():
if 'resonances' not in data[band]:
### tune file doesn't have info for this band
continue
## try to use tune file to find channel assignment before just
## assuming "most recent"
if 'channel_assignment' in data[band]:
cha_name = data[band]['channel_assignment'].split('/')[-1]
cha = session.query(ChanAssignments).filter(
ChanAssignments.stream_id==stream_id,
ChanAssignments.name==cha_name).one_or_none()
else:
cha = session.query(ChanAssignments).filter(
ChanAssignments.stream_id==stream_id,
ChanAssignments.ctime<= ctime,
ChanAssignments.band==band)
cha = cha.order_by(db.desc(ChanAssignments.ctime)).first()
if cha is None:
logger.error(f"Missing Channel Assignment for tune file {tune_path}")
continue
assign_set.append(cha)
return assign_set
def add_new_tuning(self, stream_id, ctime, tune_path, session):
"""Add new entry to the Tune table, check if needed to add to the
TunesSet table. Called by the index_metadata function.
Args
-------
stream_id : string
The stream id for the particular SMuRF slot
ctime : int
The ctime of the SMuRF action called to create the tuning file.
tune_path : path
The absolute path to the tune file
session : SQLAlchemy Session
The active session
"""
name = tune_path.split('/')[-1]
tune = session.query(Tunes).filter(Tunes.name == name,
Tunes.stream_id == stream_id).one_or_none()
if tune is None:
tune = Tunes(name=name, start=dt.datetime.utcfromtimestamp(ctime),
path=tune_path, stream_id=stream_id)
session.add(tune)
session.commit()
## assign set is the set of channel assignments that make up this tune
## file
assign_set = self._assign_set_from_file(tune_path, ctime=ctime,
stream_id=stream_id, session=session)
tuneset = None
tunesets = session.query(TuneSets)
## tunesets with any channel assignments matching list
tunesets = tunesets.filter( TuneSets.chan_assignments.any(
ChanAssignments.id.in_([a.id for a in assign_set]))).all()
if len(tunesets)>0:
for ts in tunesets:
if np.all( sorted([ca.id for ca in ts.chan_assignments]) ==
sorted([a.id for a in assign_set]) ):
tuneset = ts
if tuneset is None:
logger.debug(f"New Tuneset Detected {stream_id}, {ctime}, {[[a.name for a in assign_set]]}")
tuneset = TuneSets(name=name, path=tune_path, stream_id=stream_id,
start=dt.datetime.utcfromtimestamp(ctime))
session.add(tuneset)
session.commit()
## add a the assignments and channels to the detector set
for db_cha in assign_set:
tuneset.chan_assignments.append(db_cha)
for ch in db_cha.channels:
tuneset.channels.append(ch)
session.commit()
tune.tuneset = tuneset
session.commit()
def add_new_observation(self, stream_id, action_name, action_ctime, session,
max_early=5,max_wait=100):
"""Add new entry to the observation table. Called by the
index_metadata function.
Args
-------
stream_id : string
The stream id for the particular SMuRF slot
action_ctime : int
The ctime of the SMuRF action called to create the observation. Often
slightly different than the .g3 session ID
session : SQLAlchemy Session
The active session
max_early : int
Buffer time to allow the g3 file to be earlier than the smurf action
max_wait : int
Maximum amount of time between the streaming start action and the
making of .g3 files that belong to an observation
"""
## Check if observation exists already
obs = session.query(Observations).filter(
Observations.stream_id == stream_id,
Observations.action_ctime == action_ctime).one_or_none()
if obs is None:
x = session.query(Files.name)
x = x.filter(Files.start >= dt.datetime.utcfromtimestamp(action_ctime-max_early))
x = x.order_by(Files.start).first()
if x is None:
logger.debug(f"No .g3 files from Action {action_name} in {stream_id}"\
f" at {action_ctime}. Not Making Observation")
return
session_id = int( (x.name[:-3].split('/')[-1]).split('_')[0])
## Verify the files we found match with Observation
status = SmurfStatus.from_file(x.name)
if status.action is not None:
assert status.action == action_name
assert status.action_timestamp == action_ctime
## Verify inside of file matches the outside
reader = so3g.G3IndexedReader(x.name)
while True:
frames = reader.Process(None)
if not frames:
break
frame = frames[0]
if str(frame.type) == 'Observation':
assert frame['sostream_id'] == stream_id
assert frame['session_id'] == session_id
start = dt.datetime.utcfromtimestamp(frame['time'].time / spt3g_core.G3Units.s )
## Build Observation
obs = Observations(
obs_id=f"{stream_id}_{session_id}",
timestamp = session_id,
action_ctime = action_ctime,
action_name = action_name,
stream_id = stream_id,
start = start,
)
session.add(obs)
session.commit()
## obs.stop is only updated when streaming session is over
if obs.stop is None:
self.update_observation_files(obs, session, max_early=max_early,
max_wait=max_wait)
def update_observation_files(self, obs, session, max_early=5,
max_wait=100,force=False):
"""Update existing observation. A separate function to make it easier
to deal with partial data transfers. See add_new_observation for args
Args
-----
max_early : int
Buffer time to allow the g3 file to be earlier than the smurf action
max_wait : int
Maximum amount of time between the streaming start action and the
making of .g3 files that belong to an observation
session : SQLAlchemy Session
The active session
force : bool
If true, will recalculate file/tune information even if observation
appears complete
"""
if not force and obs.stop is not None:
return
x = session.query(Files.name).filter(
Files.start >= obs.start-dt.timedelta(seconds=max_early))
x = x.order_by(Files.start).first()
if x is None:
## no files to add at this point
return
x = x[0]
session_id, f_num = (x[:-3].split('/')[-1]).split('_')
prefix = '/'.join(x.split('/')[:-1])+'/'
if int(session_id)-obs.start.timestamp() > max_wait:
## we don't have .g3 files for some reason
return
flist = session.query(Files).filter(Files.name.like(prefix+session_id+'%'))
flist = flist.order_by(Files.start).all()
## Load Status Information
status = SmurfStatus.from_file(flist[0].name)
## Add any tags from the status
if len(status.tags)>0:
for tag in status.tags:
new_tag = Tags(obs_id = obs.obs_id, tag=tag)
obs.tag = ','.join(status.tags)
session.add(new_tag)
## Add Tune and Tuneset information
if status.tune is not None:
tune = session.query(Tunes).filter(
Tunes.name == status.tune).one_or_none()
if tune is None:
logger.warning(f"Tune {status.tune} not found in database, update error?")
tuneset = None
else:
tuneset = tune.tuneset
else:
tuneset = session.query(TuneSets).filter(TuneSets.start <= obs.start)
tuneset = tuneset.order_by(db.desc(TuneSets.start)).first()
already_have = [ts.id for ts in obs.tunesets]
if tuneset is not None:
if not tuneset.id in already_have:
obs.tunesets.append(tuneset)
obs_samps = 0
## Update file entries
for db_file in flist:
db_file.obs_id = obs.obs_id
if tuneset is not None:
db_file.detset = tuneset.name
## this is where I learned sqlite does not accept numpy 32 or 64 bit ints
file_samps = sum([fr.n_samples if fr.n_samples is not None else 0 for fr in db_file.frames])
db_file.sample_start = obs_samps
db_file.sample_stop = obs_samps + file_samps
obs_samps = obs_samps + file_samps + 1
obs_ended = False
## Search through file looking for stream closeout
reader = so3g.G3IndexedReader(flist[-1].name)
while True:
frames = reader.Process(None)
if not frames:
break
frame = frames[0]
if str(frame.type) == 'Wiring':
if 'AMCc.SmurfProcessor.FileWriter.IsOpen' in frame['status']:
status = {}
status.update(yaml.safe_load(frame['status']))
if not status['AMCc.SmurfProcessor.FileWriter.IsOpen']:
obs_ended = True
break
if obs_ended:
obs.n_samples = obs_samps-1
obs.duration = flist[-1].stop.timestamp() - flist[0].start.timestamp()
obs.stop = flist[-1].stop
session.commit()
def search_metadata_actions(self, min_ctime=16000*1e5, max_ctime=None,
reverse=False):
"""Generator used to page through smurf folder returning each action
formatted for easy use.
Args
-----
min_ctime : lowest timestamped action to return
max_ctime : highest timestamped action to return
reverse : if true, goes backward
Yields
-------
tuple (action, stream_id, ctime, path)
action : Smurf Action string with ctime removed for easy comparison
stream_id : stream_id of Action
ctime : ctime of Action folder
path : absolute path to action folder
"""
if max_ctime is None:
max_ctime = dt.datetime.now().timestamp()
if self.meta_path is None:
raise ValueError('Archiver needs meta_path attribute to index channel assignments')
logger.debug(f"Ignoring ctime folders below {int(min_ctime//1e5)}")
for ct_dir in sorted(os.listdir(self.meta_path), reverse=reverse):
if int(ct_dir) < int(min_ctime//1e5):
continue
elif int(ct_dir) > int(max_ctime//1e5):
continue
for stream_id in sorted(os.listdir( os.path.join(self.meta_path,ct_dir)), reverse=reverse):
action_path = os.path.join(self.meta_path, ct_dir, stream_id)
actions = sorted(os.listdir( action_path ), reverse=reverse)
for action in actions:
try:
ctime = int( action.split('_')[0] )
if ctime < min_ctime or ctime > max_ctime:
continue
astring = '_'.join(action.split('_')[1:])
yield (astring, stream_id, ctime, os.path.join(action_path, action))
except GeneratorExit:
return
except:
continue
def search_metadata_files(self, min_ctime=16000*1e5, max_ctime=None,
reverse=False, skip_plots=True, skip_configs=True):
"""Generator used to page through smurf folder returning each file
formatted for easy use.
Args
-----
min_ctime : int or float
Lowest timestamped action to return
max_ctime : int or float
highest timestamped action to return
reverse : bool
if true, goes backward
skip_plots : bool
if true, skips all the plots folders because we probably don't want
to look through them
skip_configs : bool
if true, skips all the config folders because we probably don't want
to look through them
Yields
-------
tuple (fname, stream_id, ctime, abs_path)
fname : string
file name with ctime removed
stream_id : string
stream_id where the file is saved
ctime : int
file ctime
abs_path : string
absolute path to file
"""
for action, stream_id, actime, path in self.search_metadata_actions(
min_ctime=min_ctime, max_ctime=max_ctime,
reverse=reverse):
if skip_configs and action == 'config':
continue
adirs = os.listdir(path)
for adir in adirs:
if skip_plots and adir == 'plots':
continue
for root, dirs, files in os.walk(os.path.join(path, adir), topdown=False):
for name in files:
try:
try:
ctime = int(name.split('_')[0])
except ValueError:
ctime = actime
fname = '_'.join(name.split('_')[1:])
yield (fname, stream_id, ctime, os.path.join(root,name))
except GeneratorExit:
return
def _process_index_error(self, session, e, stream_id, ctime, path, stop_at_error):
if type(e) == ValueError:
logger.info(f"Value Error at {stream_id}, {ctime}, {path}")
elif type(e) == IntegrityError:
# Database Integrity Errors, such as duplicate entries
session.rollback()
logger.info(f"Integrity Error at {stream_id}, {ctime}, {path}")
else:
logger.info(f"Unexplained Error at {stream_id}, {ctime}, {path}")
if stop_at_error:
raise(e)
def index_channel_assignments(self, session, min_ctime=16000*1e5,
max_ctime=None,
pattern = 'channel_assignment',
stop_at_error=False):
""" Index all channel assignments newer than a minimum ctime
Args
-----
session : G3tSmurf session connection
min_time : int of float
minimum time for for indexing
max_time : int, float, or None
maximum time for indexing
pattern : string
string pattern to look for channel assignments
"""
for fpattern, stream_id, ctime, path in self.search_metadata_files(min_ctime=min_ctime,
max_ctime=max_ctime):
if pattern in fpattern:
try:
## decide if this is the last channel assignment in the directory
## needed because we often get multiple channel assignments in the same folder
root = os.path.join('/',*path.split('/')[:-1])
cha_times = [int(f.split('_')[0]) for f in os.listdir(root) if pattern in f]
if ctime != np.max(cha_times):
continue
fname = path.split('/')[-1]
logger.debug(f"Add new channel assignment: {stream_id},{ctime}, {path}")
self.add_new_channel_assignment(stream_id, ctime,
fname, path, session)
except Exception as e:
self._process_index_error(session, e, stream_id, ctime, path, stop_at_error)
def index_tunes(self, session, min_ctime=16000*1e5, max_ctime=None,
pattern = 'tune.npy', stop_at_error=False):
""" Index all tune files newer than a minimum ctime
Args
-----
session : G3tSmurf session connection
min_time : int of float
minimum time for indexing
max_time : int, float, or None
maximum time for indexing
pattern : string
string pattern to look for tune files
"""
for fname, stream_id, ctime, path in self.search_metadata_files(min_ctime=min_ctime,
max_ctime=max_ctime):
if pattern in fname:
try:
logger.debug(f"Add new Tune: {stream_id}, {ctime}, {path}")
self.add_new_tuning(stream_id, ctime, path, session)
except Exception as e:
self._process_index_error(session, e, stream_id, ctime,
path, stop_at_error)
def index_observations(self, session, min_ctime=16000*1e5, max_ctime=None,
stop_at_error=False):
""" Index all observations newer than a minimum ctime. Uses
SMURF_ACTIONS to define which actions are observations.
Args
-----
session : G3tSmurf session connection
min_time : int or float
minimum time for indexing
max_time : int, float, or None
maximum time for indexing
"""
for action, stream_id, ctime, path in self.search_metadata_actions(min_ctime=min_ctime,
max_ctime=max_ctime):
if action in SMURF_ACTIONS['observations']:
try:
obs_path = os.listdir( os.path.join(path, 'outputs'))
logger.debug(f"Add new Observation: {stream_id}, {ctime}, {obs_path}")
self.add_new_observation(stream_id, action, ctime, session)
except Exception as e:
self._process_index_error(session, e, stream_id, ctime, path, stop_at_error)
def index_metadata(self, min_ctime=16000*1e5, max_ctime=None, stop_at_error=False):
"""Adds all channel assignments, tunes, and observations in archive to
database. Adding relevant entries to Files as well.
Args
----
min_ctime : int
Lowest ctime to start looking for new metadata
max_ctime : None or int
Highest ctime to look for new metadata
stop_at_error: bool
If True, will stop if there is an error indexing a file.
"""
if self.meta_path is None:
raise ValueError('Archiver needs meta_path attribute to index channel assignments')
session = self.Session()
logger.debug(f"Ignoring ctime folders below {int(min_ctime//1e5)}")
logger.debug("Indexing Channel Assignments")
self.index_channel_assignments(session, min_ctime=min_ctime,
max_ctime=max_ctime,
stop_at_error=stop_at_error)
logger.debug("Indexing Tune Files")
self.index_tunes(session, min_ctime=min_ctime,
max_ctime=max_ctime, stop_at_error=stop_at_error)
logger.debug("Indexing Observations")
self.index_observations(session, min_ctime=min_ctime,
max_ctime=max_ctime,stop_at_error=stop_at_error)
session.close()
def _stream_ids_in_range(self, start, end):
"""
Returns a list of all stream-id's present in a given time range.
Skips 'None' because those only contain G3PipelineInfo frames.
Args
-----
start : timestamp or DateTime
start time for data
end : timestamp or DateTime
end time for data
Returns
--------
stream_ids: List of stream ids.
"""
session = self.Session()
start = self._make_datetime(start)
end = self._make_datetime(end)
all_ids = session.query(Files.stream_id).filter(
Files.start < end,
Files.stop >= start
).all()
sids = []
for sid, in all_ids:
if sid not in sids and sid != 'None':
sids.append(sid)
return sids
def load_data(self, start, end, stream_id=None, channels=None,
show_pb=True, load_biases=True, status=None,
short_labels=True):
"""
Loads smurf G3 data for a given time range. For the specified time range
this will return a chunk of data that includes that time range.
This function returns an AxisManager with the following properties::
* Axes:
* samps : samples
* dets : resonator channels reading out
* bias_lines (optional) : bias lines
* Fields:
* timestamps : (samps,)
unix timestamps for loaded data
* signal : (dets, samps)
Array of the squid phase in units of radians for each channel
* primary : AxisManager (samps,)
"primary" data included in the packet headers
'AveragingResetBits', 'Counter0', 'Counter1', 'Counter2',
'FluxRampIncrement', 'FluxRampOffset', 'FrameCounter',
'TESRelaySetting', 'UnixTime'
* biases (optional): (bias_lines, samps)
Bias values during the data
* ch_info : AxisManager (dets,)
Information about channels, including SMuRF band, channel,
frequency.
Args
-----
start : timestamp or DateTime
start time for data
end : timestamp or DateTime
end time for data
stream_id : String
stream_id to load, in case there are multiple
channels : list or None
If not None, it should be a list that can be sent to get_channel_mask.
detset : string
the name of the detector set (tuning file) to load
show_pb : bool, optional:
If True, will show progress bar.
load_biases : bool, optional
If True, will return biases.
status : SmurfStatus, optional
If note none, will use this Status on the data load
Returns
--------
aman : AxisManager
AxisManager for the data
"""
session = self.Session()
start = self._make_datetime(start)
end = self._make_datetime(end)
if stream_id is None:
sids = self._stream_ids_in_range(start, end)
if len(sids) > 1:
raise ValueError(
"Multiple stream_ids exist in the given range! "
"Must choose one.\n"
f"stream_ids: {sids}"
)
q = session.query(Files).join(Frames).filter(Frames.stop >= start,
Frames.start < end,
Frames.type_name=='Scan')
if stream_id is not None:
q = q.filter(Files.stream_id == stream_id)
q = q.order_by(Files.start)
flist = np.unique([x.name for x in q.all()])
if stream_id is None:
stream_id = q[0].stream_id
if status is None:
scan_start = session.query(Frames.time).filter(Frames.time >= start,
Frames.type_name=='Scan')
scan_start = scan_start.order_by(Frames.time).first()
try:
status = self.load_status(scan_start[0], stream_id=stream_id)
except:
logger.info("Status load from database failed, using file load")
status = None
aman = load_file( flist, status=status, channels=channels,
archive=self, show_pb=show_pb, short_labels=short_labels)
msk = np.all([aman.timestamps >= start.timestamp(),
aman.timestamps < end.timestamp()], axis=0)
idx = np.where(msk)[0]
if len(idx) == 0:
logger.warning("No samples returned in time range")
aman.restrict('samps', (0, 0))
else:
aman.restrict('samps', (idx[0], idx[-1]))
session.close()
return aman
def load_status(self, time, stream_id=None, show_pb=False):
"""
Returns the status dict at specified unix timestamp.
Loads all status frames between session start frame and specified time.
Args:
time (timestamp): Time at which you want the rogue status
Returns:
status (SmurfStatus instance): object indexing of rogue variables
at specified time.
"""
return SmurfStatus.from_time(time, self, stream_id=stream_id,show_pb=show_pb)
def dump_DetDb(archive, detdb_file):
"""
Take a G3tSmurf archive and create a a DetDb of the type used with Context
Args
-----
archive : G3tSmurf instance
detdb_file : filename
"""
my_db = core.metadata.DetDb(map_file=detdb_file)
my_db.create_table('base', column_defs=[])
column_defs = [
"'band' int",
"'channel' int",
"'frequency' float",
"'chan_assignment' int",
]
my_db.create_table('smurf', column_defs=column_defs)
ddb_list = my_db.dets()['name']
session = archive.Session()
channels = session.query(Channels).all()
msk = np.where([ch.name not in ddb_list for ch in channels])[0].astype(int)
for ch in tqdm(np.array(channels)[msk]):
my_db.get_id( name=ch.name )
my_db.add_props('smurf', ch.name, band=ch.band,
channel=ch.channel, frequency=ch.frequency,
chan_assignment=ch.chan_assignment.ctime)
session.close()
return my_db
class SmurfStatus:
"""
This is a class that attempts to extract essential information from the
SMuRF status dictionary so it is more easily accessible. If the necessary
information for an attribute is not present in the dictionary, the
attribute will be set to None.
Args
-----
status : dict
A SMuRF status dictionary
Attributes
------------
status : dict
Full smurf status dictionary
num_chans: int
Number of channels that are streaming
mask : Optional[np.ndarray]
Array with length ``num_chans`` that describes the mapping
of readout channel to absolute smurf channel.
mask_inv : np.ndarray
Array with dimensions (NUM_BANDS, CHANS_PER_BAND) where
``mask_inv[band, chan]`` tells you the readout channel for a given
band, channel combination.
freq_map : Optional[np.ndarray]
An array of size (NUM_BANDS, CHANS_PER_BAND) that has the mapping
from (band, channel) to resonator frequency. If the mapping is not
present in the status dict, the array will full of np.nan.
filter_a : Optional[np.ndarray]
The A parameter of the readout filter.
filter_b : Optional[np.ndarray]
The B parameter of the readout filter.
filter_gain : Optional[float]
The gain of the readout filter.
filter_order : Optional[int]
The order of the readout filter.
filter_enabled : Optional[bool]
True if the readout filter is enabled.
downsample_factor : Optional[int]
Downsampling factor
downsample_enabled : Optional[bool]
Whether downsampler is enabled
flux_ramp_rate_hz : float
Flux Ramp Rate calculated from the RampMaxCnt and the digitizer
frequency.
"""
NUM_BANDS = 8
CHANS_PER_BAND = 512
def __init__(self, status):
self.status = status
self.start = self.status.get('start')
self.stop = self.status.get('stop')
# Reads in useful status values as attributes
mapper_root = 'AMCc.SmurfProcessor.ChannelMapper'
self.num_chans = self.status.get(f'{mapper_root}.NumChannels')
# Tries to set values based on expected rogue tree
self.mask = self.status.get(f'{mapper_root}.Mask')
self.mask_inv = np.full((self.NUM_BANDS, self.CHANS_PER_BAND), -1)
if self.mask is not None:
self.mask = np.array(ast.literal_eval(self.mask))
# Creates inverse mapping
for i, chan in enumerate(self.mask):
b = chan // self.CHANS_PER_BAND
c = chan % self.CHANS_PER_BAND
self.mask_inv[b, c] = i
tune_root = 'AMCc.FpgaTopLevel.AppTop.AppCore.SysgenCryo.tuneFilePath'
self.tune = self.status.get(tune_root)
if self.tune is not None and len(self.tune)>0:
self.tune = self.tune.split('/')[-1]
pysmurf_root = 'AMCc.SmurfProcessor.SOStream'
self.action = self.status.get(f'{pysmurf_root}.pysmurf_action')
if self.action == '':
self.action = None
self.action_timestamp = self.status.get(f'{pysmurf_root}.pysmurf_action_timestamp')
if self.action_timestamp == 0:
self.action_timestamp = None
filter_root = 'AMCc.SmurfProcessor.Filter'
self.filter_a = self.status.get(f'{filter_root}.A')
if self.filter_a is not None:
self.filter_a = np.array(ast.literal_eval(self.filter_a))
self.filter_b = self.status.get(f'{filter_root}.B')
if self.filter_b is not None:
self.filter_b = np.array(ast.literal_eval(self.filter_b))
self.filter_gain = self.status.get(f'{filter_root}.Gain')
self.filter_order = self.status.get(f'{filter_root}.Order')
self.filter_enabled = not self.status.get('{filter_root}.Disable')
ds_root = 'AMCc.SmurfProcessor.Downsampler'
self.downsample_factor = self.status.get(f'{ds_root}.Factor')
self.downsample_enabled = not self.status.get(f'{ds_root}.Disable')
# Tries to make resonator frequency map
self.freq_map = np.full((self.NUM_BANDS, self.CHANS_PER_BAND), np.nan)
band_roots = [
f'AMCc.FpgaTopLevel.AppTop.AppCore.SysgenCryo.Base[{band}]'
for band in range(self.NUM_BANDS)]
for band in range(self.NUM_BANDS):
band_root = band_roots[band]
band_center = self.status.get(f'{band_root}.bandCenterMHz')
subband_offset = self.status.get(f'{band_root}.toneFrequencyOffsetMHz')
channel_offset = self.status.get(f'{band_root}.CryoChannels.centerFrequencyArray')
# Skip band if one of these fields is None
if None in [band_center, subband_offset, channel_offset]:
continue
subband_offset = np.array(ast.literal_eval(subband_offset))
channel_offset = np.array(ast.literal_eval(channel_offset))
self.freq_map[band] = band_center + subband_offset + channel_offset
# Calculates flux ramp reset rate (Pulled from psmurf's code)
rtm_root = 'AMCc.FpgaTopLevel.AppTop.AppCore.RtmCryoDet'
ramp_max_cnt = self.status.get(f'{rtm_root}.RampMaxCnt')
if ramp_max_cnt is None:
self.flux_ramp_rate_hz = None
else:
digitizer_freq_mhz = float(self.status.get(
f'{band_roots[0]}.digitizerFrequencyMHz', 614.4))
ramp_max_cnt_rate_hz = 1.e6*digitizer_freq_mhz / 2.
self.flux_ramp_rate_hz = ramp_max_cnt_rate_hz / (ramp_max_cnt + 1)
self._make_tags()
def _make_tags(self, delimiters=',|\\t| '):
"""Build list of tags from SMuRF status
"""
tags = self.status.get('AMCc.SmurfProcessor.SOStream.stream_tag')
if tags is None:
self.tags = []
return
self.tags = re.split(delimiters, tags)
if len(self.tags) == 1 and self.tags[0] == '':
self.tags = []
@classmethod
def from_file(cls, filename):
"""Generates a Smurf Status from a .g3 file.
Args
----
filename : str or list
"""
if isinstance(filename, str):
filenames = [filename]
else:
filenames = filename
status = {}
for file in filenames:
reader = so3g.G3IndexedReader(file)
while True:
frames = reader.Process(None)
if len(frames) == 0:
break
frame = frames[0]
if str(frame.type) == 'Wiring':
if status.get('start') is None:
status['start'] = frame['time'].time/spt3g_core.G3Units.s
status['stop'] = frame['time'].time/spt3g_core.G3Units.s
else:
status['stop'] = frame['time'].time/spt3g_core.G3Units.s
status.update(yaml.safe_load(frame['status']))
if frame['dump']:
break
return cls(status)
@classmethod
def from_time(cls, time, archive, stream_id=None, show_pb=False):
"""Generates a Smurf Status at specified unix timestamp.
Loads all status frames between session start frame and specified time.
Args
-------
time : (timestamp)
Time at which you want the rogue status
archive : (G3tSmurf instance)
The G3tSmurf archive to use to find the status
show_pb : (bool)
Turn on or off loading progress bar
stream_id : (string)
stream_id to look for status
Returns
--------
status : (SmurfStatus instance)
object indexing of rogue variables at specified time.
"""
time = archive._make_datetime(time)
session = archive.Session()
q = session.query(Frames).filter(
Frames.type_name == 'Observation',
Frames.time <= time
).order_by(Frames.time.desc())
if stream_id is not None:
q = q.join(Files).filter(Files.stream_id==stream_id)
else:
sids = archive._stream_ids_in_range(q[0].time, time)
if len(sids) > 1:
raise ValueError(
"Multiple stream_ids exist in the given range! "
"Must choose one to load SmurfStatus.\n"
f"stream_ids: {sids}"
)
if q.count()==0:
logger.error(f"No Frames found before time: {time}, stream_id: {stream_id}")
start_frame = q.first()
session_start = start_frame.time
if stream_id is None:
stream_id = start_frame.file.stream_id
status_frames = session.query(Frames).join(Files).filter(
Files.stream_id == stream_id,
Frames.type_name == 'Wiring',
Frames.time >= session_start,
Frames.time <= time
).order_by(Frames.time)
## Look for the last dump frame if avaliable
dump_frame = status_frames.filter(
Frames.status_dump
).order_by(Frames.time.desc()).first()
if dump_frame is not None:
status_frames = [dump_frame]
else:
logger.info("Status dump frame not found, reading all status frames")
status_frames = status_frames.all()
status = {
'start':status_frames[0].time.timestamp(),
'stop':status_frames[-1].time.timestamp(),
}
cur_file = None
for frame_info in tqdm(status_frames, disable=(not show_pb)):
file = frame_info.file.name
if file != cur_file:
reader = so3g.G3IndexedReader(file)
cur_file = file
reader.Seek(frame_info.offset)
frame = reader.Process(None)[0]
status.update(yaml.safe_load(frame['status']))
session.close()
return cls(status)
def readout_to_smurf(self, rchan):
"""
Converts from a readout channel number to (band, channel).
Args
-----
rchans : int or List[int]
Readout channel to convert. If a list or array is passed,
this will return an array of bands and array of smurf channels.
Returns
--------
band, channel : (int, int) or (List[int], List[int])
The band, channel combination that is has readout channel
``rchan``.
"""
abs_smurf_chan = self.mask[rchan]
return (abs_smurf_chan // self.CHANS_PER_BAND,
abs_smurf_chan % self.CHANS_PER_BAND)
def smurf_to_readout(self, band, chan):
"""
Converts from (band, channel) to a readout channel number.
If the channel is not streaming, returns -1.
Args:
band : int, List[int]
The band number, or list of band numbers corresopnding to
channel input array.
chan : int, List[int]
Channel number or list of channel numbers.
"""
return self.mask_inv[band, chan]
def get_channel_mask(ch_list, status, archive=None, obsfiledb=None,
ignore_missing=True):
"""Take a list of desired channels and parse them so the different
data loading functions can load them.
Args
------
ch_list : list
List of desired channels the type of each list element is used
to determine what it is:
* int : absolute readout channel
* (int, int) : band, channel
* string : channel name (requires archive or obsfiledb)
* float : frequency in the smurf status (or should we use channel assignment?)
status : SmurfStatus instance
Status to use to generate channel loading mask
archive : G3tSmurf instance
Archive used to search for channel names / frequencies
obsfiledb : ObsFileDb instance
ObsFileDb used to search for channel names if archive is None
ignore_missing : bool
If true, will not raise errors if a requested channel is not found
Returns
-------
mask : bool array
Mask for the channels in the SmurfStatus
TODO: When loading from name, need to check tune file in use during file.
"""
if status.mask is None:
raise ValueError("Status Mask not set")
session = None
if archive is not None:
session = archive.Session()
msk = np.zeros( (status.num_chans,), dtype='bool')
for ch in ch_list:
if np.isscalar(ch):
if np.issubdtype( type(ch), np.integer):
#### this is an absolute readout channel
if not ignore_missing and ~np.any(status.mask == ch):
raise ValueError(f"channel {ch} not found")
msk[ status.mask == ch] = True
elif np.issubdtype( type(ch), np.floating):
#### this is a resonator frequency
b,c = np.where( np.isclose(status.freq_map, ch, rtol=1e-7) )
if len(b)==0:
if not ignore_missing:
raise ValueError(f"channel {ch} not found")
continue
elif status.mask_inv[b,c][0]==-1:
if not ignore_missing:
raise ValueError(f"channel {ch} not streaming")
continue
msk[status.mask_inv[b,c][0]] = True
elif np.issubdtype( type(ch), np.str_):
#### this is a channel name
if session is not None:
channel = session.query(Channels).filter(Channels.name==ch).one_or_none()
if channel is None:
if not ignore_missing:
raise ValueError(f"channel {ch} not found in G3tSmurf Archive")
continue
b,c = channel.band, channel.channel
elif obsfiledb is not None:
c = obsfiledb.conn.execute('select band,channel from channels where name=?',(ch,))
c = [(r[0],r[1]) for r in c]
if len(c) == 0:
if not ignore_missing:
raise ValueError(f"channel {ch} not found in obsfiledb")
continue
b,c = c[0]
else:
raise ValueError("Need G3tSmurf Archive or Obsfiledb to pass channel names")
idx = status.mask_inv[b,c]
if idx == -1:
if not ignore_missing:
raise ValueError(f"channel {ch} not streaming")
continue
msk[idx] = True
else:
raise TypeError(f"type {type(ch)} for channel {ch} not understood")
else:
if len(ch) == 2:
### this is a band, channel pair
idx = status.mask_inv[ch[0], ch[1]]
if idx == -1:
if not ignore_missing:
raise ValueError(f"channel {ch} not streaming")
continue
msk[idx] = True
else:
raise TypeError(f"type for channel {ch} not understood")
if session is not None:
session.close()
return msk
def _get_tuneset_channel_names(status, ch_map, archive):
"""Update channel maps with name from Tuneset
"""
session = archive.Session()
## tune file in status
if status.tune is not None and len(status.tune) > 0:
tune_file = status.tune.split('/')[-1]
tune = session.query(Tunes).filter(Tunes.name == tune_file).one_or_none()
if tune is None :
logger.info(f"Tune file {tune_file} not found in G3tSmurf archive")
return ch_map
if tune.tuneset is None:
logger.info(f"Tune file {tune_file} has no TuneSet in G3tSmurf archive")
return ch_map
else:
logger.info("Tune information not in SmurfStatus, using most recent Tune")
tune = session.query(Tunes).filter(Tunes.start <= dt.datetime.utcfromtimestamp(status.start))
tune = tune.order_by(db.desc(Tunes.start)).first()
if tune is None:
logger.info("Most recent Tune does not exist")
return ch_map
if tune.tuneset is None:
logger.info(f"Tune file {tune.name} has no TuneSet in G3tSmurf archive")
return ch_map
bands, channels, names = zip(*[(ch.band, ch.channel, ch.name) for ch in tune.tuneset.channels])
ruids = []
for i in range(len(ch_map)):
try:
msk = np.all( [ch_map['band'][i]== bands, ch_map['channel'][i]==channels], axis=0)
j = np.where(msk)[0][0]
ruids.append( names[j] )
except:
logger.info(f"Information retrival error for Detector {ch_map[i]}")
ruids.append( 'sch_NONE_{}_{:03d}'.format(ch_map['band'][i],
ch_map['channel'][i]) )
continue
session.close()
return ruids
def _get_detset_channel_names(status, ch_map, obsfiledb):
"""Update channel maps with name from obsfiledb
"""
## tune file in status
if status.tune is not None and len(status.tune) > 0:
c = obsfiledb.conn.execute('select tuneset_id from tunes '
'where name=?', (status.tune,))
tuneset_id = [r[0] for r in c][0]
else:
logger.info("Tune information not in SmurfStatus, using most recent Tune")
c = obsfiledb.conn.execute('select tuneset_id from tunes '
'where start<=? '
'order by start desc', (dt.datetime.utcfromtimestamp(status.start),))
tuneset_id = [r[0] for r in c][0]
c = obsfiledb.conn.execute('select name from tunesets '
'where id=?', (tuneset_id,))
tuneset = [r[0] for r in c][0]
c = obsfiledb.conn.execute('select det from detsets '
'where name=?', (tuneset,))
detsets = [r[0] for r in c]
ruids = []
if len(detsets) == 0:
logger.warning("Found no detsets related to this observation, is the database incomplete?")
for i in range(len(ch_map)):
ruids.append( 'sch_NONE_{}_{:03d}'.format(ch_map['band'][i],
ch_map['channel'][i]) )
return ruids
sql="select band,channel,name from channels where name in ({seq})".format(
seq=','.join(['?']*len(detsets)))
c = obsfiledb.conn.execute(sql,detsets)
bands, channels, names = zip(*[(r[0],r[1],r[2]) for r in c])
for i in range(len(ch_map)):
try:
msk = np.all( [ch_map['band'][i]== bands, ch_map['channel'][i]==channels], axis=0)
j = np.where(msk)[0][0]
ruids.append( names[j] )
except:
logger.info(f"Information retrival error for Detector {ch_map[i]}")
ruids.append( 'sch_NONE_{}_{:03d}'.format(ch_map['band'][i],
ch_map['channel'][i]) )
continue
return ruids
def _get_channel_mapping(status, ch_map):
"""Generate baseline channel map from status object
"""
for i, ch in enumerate(ch_map['idx']):
try:
sch = status.readout_to_smurf( ch )
ch_map[i]['rchannel'] = 'r{:04d}'.format(ch)
ch_map[i]['freqs']= status.freq_map[sch[0], sch[1]]
ch_map[i]['band'] = sch[0]
ch_map[i]['channel'] = sch[1]
except:
ch_map[i]['rchannel'] = 'r{:04d}'.format(ch)
ch_map[i]['freqs']= -1
ch_map[i]['band'] = -1
ch_map[i]['channel'] = -1
return ch_map
def get_channel_info(status, mask=None, archive=None, obsfiledb=None,
det_axis='dets', short_labels=True):
"""Create the Channel Info Section of a G3tSmurf AxisManager
This function returns an AxisManager with the following properties::
* Axes:
* channels : resonator channels reading out
* Fields:
* band : Smurf Band
* channel : Smurf Channel
* frequency : resonator frequency
* rchannel : readout channel
* ruid : readout unique ID
Args
-----
status : SmurfStatus instance
mask : bool array
mask of which channels to use
archive : G3tSmurf instance (optionl)
G3tSmurf instance for looking for tunes/tunesets
obsfiledb : ObsfileDb instance (optional)
ObsfileDb instance for det names / band / channel
short_labels : bool
Makes the labels used in the detector axis shorter/easier to read
if false the labels will be the full readout unique ID
Returns
--------
ch_info : AxisManager
"""
ch_list = np.arange( status.num_chans )
if mask is not None:
ch_list = ch_list[mask]
ch_map = np.zeros( len(ch_list), dtype = [('idx', int), ('rchannel', np.unicode_,30),
('band', int), ('channel', int),
('freqs', float)])
ch_map['idx'] = ch_list
ch_map = _get_channel_mapping(status, ch_map)
if archive is not None:
ruids = _get_tuneset_channel_names(status, ch_map, archive)
elif obsfiledb is not None:
ruids = _get_detset_channel_names(status, ch_map, obsfiledb)
else:
ruids = None
if short_labels or ruids is None:
if not short_labels:
logger.debug("Ignoring RUID request because not loading from database")
labels = ['sbch_{}_{:03d}'.format(ch_map['band'][i], ch_map['channel'][i]) for i in range(len(ch_list))]
ch_info = core.AxisManager( core.LabelAxis(det_axis, labels),)
else:
ch_info = core.AxisManager( core.LabelAxis(det_axis, ruids),)
ch_info.wrap('band', ch_map['band'], ([(0,det_axis)]) )
ch_info.wrap('channel', ch_map['channel'], ([(0,det_axis)]) )
ch_info.wrap('frequency', ch_map['freqs'], ([(0,det_axis)]) )
ch_info.wrap('rchannel', ch_map['rchannel'], ([(0,det_axis)]) )
if ruids is not None:
ch_info.wrap('ruid', np.array(ruids), ([(0,det_axis)]) )
return ch_info
def _get_timestamps(streams, load_type=None):
"""Calculate the timestamp field for loaded data
Args
-----
streams : dictionary
result from unpacking the desired data frames
load_type : None or int
if None, uses highest precision version possible. integer values
will use the TimingParadigm class for indexing
"""
if load_type is None:
## determine the desired loading type. Expand as logic as
## data fields develop
if 'primary' in streams:
if 'UnixTime' in streams['primary']:
load_type = TimingParadigm.SmurfUnixTime
else:
load_type = TimingParadigm.G3Timestream
else:
load_type = TimingParadigm.G3Timestream
if load_type == TimingParadigm.SmurfUnixTime:
return io_load.hstack_into(None, streams['primary']['UnixTime'])/1e9
if load_type == TimingParadigm.G3Timestream:
return io_load.hstack_into(None, streams['time'])
logger.error("Timing System could not be determined")
def load_file(filename, channels=None, ignore_missing=True,
load_biases=True, load_primary=True, status=None,
archive=None, obsfiledb=None, show_pb=True, det_axis='dets',
short_labels=True):
"""Load data from file where there may or may not be a connected archive.
Args
----
filename : str or list
A filename or list of filenames (to be loaded in order).
Note that SmurfStatus is only loaded from the first file
channels: list or None
If not None, it should be a list that can be sent to get_channel_mask.
ignore_missing : bool
If true, will not raise errors if a requested channel is not found
load_biases : bool
If true, will load the bias lines for each detector
load_primary : bool
If true, loads the primary data fields, old .g3 files may not have
these fields.
archive : a G3tSmurf instance (optional)
obsfiledb : a ObsFileDb instance (optional, used when loading from context)
status : a SmurfStatus Instance we don't want to use the one from the
first file
det_axis : name of the axis used for channels / detectors
short_labels : bool
Makes the labels used in the detector axis shorter/easier to read
if false the labels will be the full readout unique ID
Returns
---------
aman : AxisManager
AxisManager with the data with axes for `channels` and `samps`. It will
always have fields `timestamps`, `signal`, `flags`(FlagManager),
`ch_info` (AxisManager with `bands`, `channels`, `frequency`, etc).
"""
logger.debug(f"Axis Manager will have {det_axis} and samps axes")
if isinstance(filename, str):
filenames = [filename]
else:
filenames = filename
if len(filenames) == 0:
logger.error("No files provided to load")
if status is not None and status.num_chans is None:
logger.warning("Status information is missing 'num_chans.' Will try to fix.")
status = None
if status is None:
try:
logger.debug(f"Loading status from {filenames[0]}")
status = SmurfStatus.from_file(filenames[0])
except:
logger.warning(f"Failed to load status from {filenames[0]}.")
if status is None or status.num_chans is None:
try:
logger.warning(f"Complete status not available in {filenames[0]}\n"
"Trying to load status frame from the file at the start "
"of the corresponding observation.")
file_id = filenames[0].split('/')[-1][10:]
status_fp = filenames[0].replace(file_id, '_000.g3')
status = SmurfStatus.from_file(status_fp)
except Exception as e:
logger.error(f'Error when trying to load status from {status_fp}, maybe the file doesn\'t exist?'
'Please load the status manually.')
raise e
if channels is not None:
if len(channels) == 0:
logger.error("Requested empty list of channels. Use channels=None to "
"load all channels.")
ch_mask = get_channel_mask(channels, status, archive=archive,
obsfiledb=obsfiledb,
ignore_missing=ignore_missing)
else:
ch_mask = None
ch_info = get_channel_info(status, ch_mask, archive=archive,
obsfiledb=obsfiledb, det_axis=det_axis,
short_labels=short_labels)
subreq = [
io_load.FieldGroup('data', ch_info.rchannel, timestamp_field='time'),
]
if load_primary:
subreq.extend( [io_load.FieldGroup('primary', [io_load.Field('*', wildcard=True)])] )
if load_biases:
subreq.extend( [io_load.FieldGroup('tes_biases', [io_load.Field('*', wildcard=True)]),])
request = io_load.FieldGroup('root', subreq)
streams = None
try:
for filename in tqdm( filenames , total=len(filenames), disable=(not show_pb)):
streams = io_load.unpack_frames(filename, request, streams=streams)
except KeyError:
logger.error("Frames do not contain expected fields. Did Channel Mask change during the file?")
raise
count = sum(map(len,streams['time']))
## Build AxisManager
aman = core.AxisManager(
ch_info[det_axis].copy(),
core.OffsetAxis('samps', count, 0)
)
aman.wrap( 'timestamps', _get_timestamps(streams), ([(0,'samps')]))
# Conversion from DAC counts to squid phase
aman.wrap( 'signal', np.zeros(aman.shape, 'float32'),
[(0, det_axis), (1, 'samps')])
for idx in range(aman[det_axis].count):
io_load.hstack_into(aman.signal[idx], streams['data'][ch_info.rchannel[idx]])
rad_per_count = np.pi / 2**15
aman.signal *= rad_per_count
aman.wrap('ch_info', ch_info)
temp = core.AxisManager( aman.samps.copy() )
if load_primary:
for k in streams['primary'].keys():
temp.wrap(k, io_load.hstack_into(None, streams['primary'][k]), ([(0,'samps')]))
aman.wrap('primary', temp)
if load_biases:
bias_axis = core.LabelAxis('bias_lines', np.arange(len(streams['tes_biases'].keys())))
aman.wrap('biases', | np.zeros((bias_axis.count, aman.samps.count)) | numpy.zeros |
import numpy as np
"""
All of the rotation parameterizations here represent rotations
of an initial fixed frame to a new frame. Therefore using these
parameterizations to operate on a vector is the same as transforming
the new frame coordinates into the original fixed frame.
Examples if rotation describes relation of body w.r.t. inertial
(how to rotate inertial to body):
R: coverts body frame coordinates into inertial frame coordinates
euler: usual roll-pitch-yaw of body frame w.r.t inertial frame
"""
def R_to_euler(R):
""" Converts the rotation matrix R to the Euler angles (ZYX)
(Yaw-Pitch-Roll) (psi, th, phi) used for aircraft conventions.
Note to compute the Euler angles for the aircraft this should be
the R matrix that converts a vector from body frame coordinates
to a vector in inertial frame coordinates.
"""
if np.sqrt(R[0,0]**2 + R[1,0]**2) >= .000001:
phi = np.arctan2(R[2,1], R[2,2])
th = np.arcsin(-R[2,0])
psi = np.arctan2(R[1,0], R[0,0])
else:
phi = np.arctan2(-R[1,2], R[1,1])
th = np.arcsin(-R[2,0])
psi = 0.0
return phi, th, psi
def euler_to_R(phi,th,psi):
"""
Converts Euler angles (ZYX) (Yaw-Pitch-Roll) (psi, th, phi)
into the inertial to body rotation matrix """
return np.array([[np.cos(th)*np.cos(psi),
np.cos(th)*np.sin(psi),
-np.sin(th)],
[np.sin(phi)*np.sin(th)*np.cos(psi) - np.cos(phi)*np.sin(psi),
np.cos(phi)*np.cos(psi) + np.sin(phi)* | np.sin(th) | numpy.sin |
# author: <NAME>, <NAME>
# the file is originated from tusv.py from TUSV by Jesse. <NAME> fixed bugs and extend to current model TUSV-est.
# # # # # # # # # # #
# I M P O R T S #
# # # # # # # # # # #
import copy
import sys # for command line arguments
import os # for manipulating files and folders
import argparse # for command line arguments
import random
import numpy as np
import multiprocessing as mp
from datetime import datetime
from graphviz import Digraph
from ete2 import Tree # for creating phylogenetic trees for .xml output
from Bio import Phylo # for creating phylogenies to export as phylo .xml files
from cStringIO import StringIO # for converting string to file (for creating initial phylo .xml)
sys.path.insert(0, 'model/')
sys.path.insert(0, 'help/')
import solver as sv
import file_manager as fm # sanitizes file and directory arguments
import generate_matrices as gm # gets F, Q, G, A, H from .vcf files
import printer as pt
import vcf_help as vh
import pickle
from snv_matching import snv_assign
# # # # # # # # # # # # #
# C O N S T A N T S #
# # # # # # # # # # # # #
MAX_NUM_LEAVES = 10
MAX_COPY_NUM = 20
MAX_CORD_DESC_ITERS = 1000
MAX_RESTART_ITERS = 1000
NUM_CORES = mp.cpu_count()
METADATA_FNAME = 'data/2017_09_18_metadata.vcf'
STR_DTYPE = 'S50'
# # # # # # # # # # # # #
# F U N C T I O N S #
# # # # # # # # # # # # #
def main(argv):
args = get_args(argv)
write_readme(args['output_directory'], args)
unmix(args['input_directory'], args['output_directory'], args['num_leaves'], args['c_max'], args['lambda1'], args['lambda2'], args['restart_iters'], args['cord_desc_iters'], args['processors'], args['time_limit'], args['metadata_file'], args['num_subsamples'], args['overide_lambdas'], args['constant'], args['sv_upperbound'], args['only_leaf'], args['collapse'], args['threshold'], args['multi_num_clones'])
# input: num_seg_subsamples (int or None) number of segments to include in deconvolution. these are
# in addition to any segments contining an SV as thos are manditory for the SV. None is all segments
def unmix(in_dir, out_dir, n, c_max, lamb1, lamb2, num_restarts, num_cd_iters, num_processors, time_limit, metadata_fname, \
num_seg_subsamples, should_overide_lambdas, const, sv_ub, only_leaf, collapse, threshold, multi_num_clones=False):
print("unmix")
F_phasing_full, F_unsampled_phasing_full, Q_full, Q_unsampled_full, G, A, H, bp_attr, cv_attr, F_info_phasing, \
F_unsampled_info_phasing, sampled_snv_list_sort, unsampled_snv_list_sort, sampled_sv_list_sort, unsampled_sv_list_sort = gm.get_mats(in_dir, n, const=const, sv_ub=sv_ub)
Q_full, Q_unsampled_full, G, A, H, F_phasing_full, F_unsampled_phasing_full = check_valid_input(Q_full, Q_unsampled_full,G, A, H, F_phasing_full, F_unsampled_phasing_full)
np.savetxt(out_dir + "/F_info_phasing.csv", F_info_phasing, delimiter='\t', fmt='%s')
np.savetxt(out_dir + "/F_unsampled_info_phasing.csv", F_unsampled_info_phasing, delimiter='\t', fmt='%s')
np.savetxt(out_dir + "/sampled_snv_list_sort.csv", sampled_snv_list_sort, delimiter='\t', fmt='%d')
np.savetxt(out_dir + "/unsampled_snv_list_sort.csv", unsampled_snv_list_sort, delimiter='\t', fmt='%d')
np.savetxt(out_dir + "/sampled_sv_list_sort.csv", sampled_sv_list_sort, delimiter='\t', fmt='%d')
np.savetxt(out_dir + "/unsampled_sv_list_sort.csv", unsampled_sv_list_sort, delimiter='\t', fmt='%d')
F_phasing, Q, Q_unsampled, org_indxs = randomly_remove_segments(F_phasing_full, Q_full, Q_unsampled_full, num_seg_subsamples)
np.savetxt(out_dir + '/F_phasing.tsv', F_phasing, delimiter='\t', fmt='%.8f')
np.savetxt(out_dir + '/F_unsampled_phasing_full.tsv', F_unsampled_phasing_full, delimiter='\t', fmt='%.8f')
# replace lambda1 and lambda2 with input derived values if should_orveride_lamdas was specified
m = len(F_phasing)
l_g, r = Q.shape
g_un = Q_unsampled.shape[0]
print('The num of features of F is '+str(l_g)+ ', the num of copy numbers is ' +str(r)+ ', the num of unsampled SNV is ' + str(g_un)+ '.')
if should_overide_lambdas:
lamb1 = float(l_g + 2*r) / float(2*r) * float(m) / float(2 * (n-1) )/2
lamb2 = float(l_g + 2*r) / float(l_g)/2
Us, Cs, Es, As, obj_vals, Rs, Ws, W_SVs, W_SNVs = [], [], [], [], [], [], [], [], []
num_complete = 0
if not multi_num_clones:
for i in xrange(0, num_restarts):
U, C, E, A_, R, W, W_SV, W_SNV, obj_val, err_msg = sv.get_UCE(F_phasing, Q, G, A, H, n, c_max, lamb1, lamb2, num_cd_iters, time_limit, only_leaf)
printnow(str(i + 1) + ' of ' + str(num_restarts) + ' random restarts complete\n')
Us.append(U)
Cs.append(C)
Es.append(E)
As.append(A_)
Rs.append(R)
Ws.append(W)
W_SVs.append(W_SV)
W_SNVs.append(W_SNV)
obj_vals.append(obj_val)
best_i = 0
best_obj_val = obj_vals[best_i]
for i, obj_val in enumerate(obj_vals):
if obj_val < best_obj_val:
best_obj_val = obj_val
best_i = i
with open(out_dir + "/training_objective", 'w') as f:
f.write(str(best_obj_val))
E_pre = copy.deepcopy(Es[best_i])
R_pre = copy.deepcopy(Rs[best_i])
W_pre = copy.deepcopy(Ws[best_i])
if collapse:
U_best, C_best, E_best, A_best, R_best, W_best, W_SV_best, W_SNV_best = collapse_nodes(Us[best_i], Cs[best_i], Es[best_i], As[best_i], Rs[best_i], Ws[best_i], W_SVs[best_i], W_SNVs[best_i], threshold,only_leaf)
else:
U_best, C_best, E_best, A_best, R_best, W_best, W_SV_best, W_SNV_best = Us[best_i], Cs[best_i], Es[best_i], As[best_i], Rs[best_i], Ws[best_i], W_SVs[best_i], W_SNVs[best_i]
min_node, min_dist, W_unsampled = snv_assign(C_best[:, -2*r:], Q_unsampled, A_best, E_best, U_best, F_unsampled_phasing_full)
| np.savetxt(out_dir + "/unsampled_assignment.csv", min_node, delimiter=',') | numpy.savetxt |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import scipy.stats as stats
import h5py as h5
from tensorsignatures.config import *
from tensorsignatures.util import *
import subprocess
import os
class TensorSignatureData(object):
r"""Makes sample data for TensorSignatures
Args:
seed (:obj:`int`): Seed for signature instantiation
rank (:obj:`int`, :math:`2\leq s\leq 40`): The number of signatures
that shall be used to create the mutation counts.
samples (:obj:`int`, :math:`n\geq 1`): The number of samples in the
artificial dataset
size (:obj:`int`, :math:`\tau\geq 1`)): size of counts.
mutations (:obj:`int`, :math:`\text{mutations}\geq 1`)): Number of
mutations per genome.
dim (:obj:`list` of :obj:`int`): List indicating the size of additional
genomic dimensions.
verbose (:obj:`bool`): Verbose mode.
Returns:
A TensorSignaturesData object.
"""
def __init__(self,
seed,
rank,
samples=100,
size=50,
mutations=1000,
verbose=False,
dimensions=[2],
**kwargs):
self.seed = seed
np.random.seed(self.seed)
self.verbose = verbose
self.samples = samples
self.mutations = mutations
self.rank = rank
self.size = size
self.dim = dimensions
self.shape = [1 for j in enumerate(self.dim)]
self.idx = np.random.choice(
np.arange(40), replace=False, size=self.rank)
self.S0 = np.loadtxt(SIMULATION)[:, self.idx]
self.T0 = np.loadtxt(OTHER)[:, self.idx]
self.S1
self.B
self.A
self.M
self.K
self.S = self.S1 * self.B * self.A * self.M
for i, k in self.K.items():
self.S = self.S * k
self.T = self.T0 * (1 - self.M.reshape(-1, self.rank))
def __getitem__(self, item):
if not hasattr(self, '_var'):
self._var = {
'b0': self.b0,
'a0': self.a0,
'm': self.m,
'E': self.E,
**self._k}
return self._var[item]
def __add_noise(self, signatures, noise_strengh):
# Adds noise to mutation type probabilities.
p, r = signatures.shape
S = []
for r_i in range(r):
S_i = signatures[:, r_i] \
+ | np.random.uniform(-signatures[:, r_i], signatures[:, r_i]) | numpy.random.uniform |
'''
Basic classes for sections and surfaces, and fundamental functions
'''
import copy
import os
import re
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import CubicSpline
from scipy import spatial
from scipy.interpolate import interp1d
from scipy.spatial.distance import cdist
class BasicSection():
'''
Section: 3D curve and 2D unit curve
'''
def __init__(self, thick=None, chord=1.0, twist=0.0):
self.xLE = 0.0
self.yLE = 0.0
self.zLE = 0.0
self.chord = chord
self.twist = twist
self.thick = 0.0
self.thick_set = thick
#* 2D unit curve
self.xx = None
self.yy = None # open curve
self.yu = None # upper surface of closed curve
self.yl = None # lower surface of closed curve
#* 3D section
self.x = np.zeros(1)
self.y = np.zeros(1)
self.z = np.zeros(1)
def set_params(self, init=False, **kwargs):
'''
Set parameters of the section
### Inputs:
```text
init: True, set to default values
```
### kwargs:
```text
xLE, yLE, zLE, chord, twist, thick (None)
```
'''
if init:
self.xLE = 0.0
self.yLE = 0.0
self.zLE = 0.0
self.chord = 1.0
self.twist = 0.0
self.thick = 0.0
self.thick_set = None
return
if 'xLE' in kwargs.keys():
self.xLE = kwargs['xLE']
if 'yLE' in kwargs.keys():
self.yLE = kwargs['yLE']
if 'zLE' in kwargs.keys():
self.zLE = kwargs['zLE']
if 'chord' in kwargs.keys():
self.chord = kwargs['chord']
if 'twist' in kwargs.keys():
self.twist = kwargs['twist']
if 'thick' in kwargs.keys():
self.thick_set = kwargs['thick']
def section(self, nn=1001, flip_x=False, proj=True):
'''
### Functions:
```text
1. Construct 2D unit curve (null in the BasicSection)
2. Transform to 3D curve
```
### Inputs:
```text
nn: total amount of points (it's here for function BasicSurface.geo_secs)
flip_x: True ~ flip section.xx in reverse order
proj: True => for unit airfoil, the rotation keeps the projection length the same
```
'''
if not isinstance(self.xx, np.ndarray):
raise Exception('The 2D curve has not been constructed')
#* Flip xx
if flip_x:
self.xx = np.flip(self.xx)
#* Transform to 3D for open section
if isinstance(self.yy, np.ndarray):
self.x, _, self.y, _ = transform(self.xx, self.xx, self.yy, self.yy,
scale=self.chord, rot=self.twist, dx=self.xLE, dy=self.yLE, proj=proj)
self.z = np.ones_like(self.x)*self.zLE
#* Transform to 3D for closed section
if isinstance(self.yu, np.ndarray):
xu_, xl_, yu_, yl_ = transform(self.xx, self.xx, self.yu, self.yl,
scale=self.chord, rot=self.twist, dx=self.xLE, dy=self.yLE, proj=proj)
self.x = np.concatenate((np.flip(xl_),xu_[1:]), axis=0)
self.y = np.concatenate((np.flip(yl_),yu_[1:]), axis=0)
self.z = np.ones_like(self.x)*self.zLE
def copyfrom(self, other):
'''
Copy from anthor BasicSection object
'''
if not isinstance(other, BasicSection):
raise Exception('Must copy from another BasicSection object')
self.xLE = other.xLE
self.yLE = other.yLE
self.zLE = other.zLE
self.chord = other.chord
self.twist = other.twist
self.xx = copy.deepcopy(other.xx)
self.yy = copy.deepcopy(other.yy)
self.yu = copy.deepcopy(other.yu)
self.yl = copy.deepcopy(other.yl)
self.x = other.x.copy()
self.y = other.y.copy()
self.z = other.z.copy()
class BasicSurface():
'''
Construct multi-section surface with BasicSection objects.
>>> BasicSurface(n_sec=0, name='Surf', nn=1001, ns=101, project=True)
'''
def __init__(self, n_sec=0, name='Surf', nn=1001, ns=101, project=True):
n_ = max(1, n_sec)
self.l2d = n_ == 1 # type: bool
self.name = name # type: str
self.nn = nn # type: int
self.ns = ns # type: int
self.secs = [ BasicSection() for _ in range(n_) ]
self.surfs = [] # type: list[list]
self.project = project # type: bool
# Parameters for plot
self.half_s = 0.5 # type: float
self.center = np.array([0.5, 0.5, 0.5])
@property
def n_sec(self):
return len(self.secs)
@property
def zLE_secs(self):
'''
List of section zLE
'''
return [round(sec.zLE,5) for sec in self.secs]
def read_setting(self, fname: str):
'''
Read in Surface layout parameters from file
### Inputs:
```text
fname: control file name
```
'''
if not os.path.exists(fname):
raise Exception(fname+' does not exist for surface setting')
key_dict = {'Layout:': 1}
found_surf = False
found_key = 0
with open(fname, 'r') as f:
lines = f.readlines()
iL = 0
while iL<len(lines):
line = lines[iL].split()
if len(line) < 1:
iL += 1
continue
if not found_surf and len(line) > 1:
if '[Surf]' in line[0] and self.name == line[1]:
found_surf = True
elif found_surf and '[Surf]' in line[0]:
break
elif found_surf and found_key == 0:
if line[0] in key_dict:
found_key = key_dict[line[0]]
elif found_surf and found_key == 1:
for i in range(self.n_sec):
iL += 1
line = lines[iL].split()
self.secs[i].xLE = float(line[0])
self.secs[i].yLE = float(line[1])
self.secs[i].zLE = float(line[2])
self.secs[i].chord = float(line[3])
self.secs[i].twist = float(line[4])
if len(line) >= 6:
self.secs[i].thick_set = float(line[5])
if self.l2d:
self.secs[i].zLE = 0.0
found_key = 0
else:
# Lines that are not relevant
pass
iL += 1
self.layout_center()
def layout_center(self):
'''
Locate layout center for plot
'''
x_range = [self.secs[0].xLE, self.secs[0].xLE]
y_range = [self.secs[0].yLE, self.secs[0].yLE]
z_range = [self.secs[0].zLE, self.secs[0].zLE]
for i in range(self.n_sec):
x_range[0] = min(x_range[0], self.secs[i].xLE)
x_range[1] = max(x_range[1], self.secs[i].xLE+self.secs[i].chord)
y_range[0] = min(y_range[0], self.secs[i].yLE)
y_range[1] = max(y_range[1], self.secs[i].yLE)
z_range[0] = min(z_range[0], self.secs[i].zLE)
z_range[1] = max(z_range[1], self.secs[i].zLE)
span = np.array([x_range[1]-x_range[0], y_range[1]-y_range[0], z_range[1]-z_range[0]])
self.half_s = span.max()/2.0
self.center[0] = 0.5*(x_range[1]+x_range[0])
self.center[1] = 0.5*(y_range[1]+y_range[0])
self.center[2] = 0.5*(z_range[1]+z_range[0])
def copyfrom(self, other):
'''
Copy from another BasicSurface object
'''
if not isinstance(other, BasicSurface):
raise Exception('Must copy from a BasicSurface object')
self.l2d = other.l2d
self.name = other.name
self.nn = other.nn
self.ns = other.ns
self.secs = copy.deepcopy(other.secs)
self.surfs = copy.deepcopy(other.surfs)
self.half_s = other.half_s
self.center = other.center.copy()
def linear_interpolate_z(self, z: float, key='x'):
'''
Linear interpolation of key by given z
>>> key_value = linear_interpolate_z(z: float, key='x')
### Inputs:
```text
z: location of the value
key: The value to be interpolated
'x' or 'X'
'y' or 'Y'
'c' or 'C' or 'chord'
't' or 'thick' or 'thickness'
'twist'
```
'''
#* Find the adjacent control sections
i_sec = self.n_sec
for i in range(self.n_sec-1):
if (z-self.secs[i].zLE)*(z-self.secs[i+1].zLE)<0 or z==self.secs[i].zLE:
i_sec = i
if i_sec >= self.n_sec:
raise Exception('z is not within the surface: ', z, self.secs[0].zLE, self.secs[-1].zLE)
#* Linear interpolation
tt = (z-self.secs[i_sec].zLE)/(self.secs[i_sec+1].zLE-self.secs[i_sec].zLE)
key_value = None
if key == 'x' or key == 'X':
key_value = (1-tt)*self.secs[i_sec].xLE + tt*self.secs[i_sec+1].xLE
elif key == 'y' or key == 'Y':
key_value = (1-tt)*self.secs[i_sec].yLE + tt*self.secs[i_sec+1].yLE
elif key == 'c' or key == 'C' or key == 'chord':
key_value = (1-tt)*self.secs[i_sec].chord + tt*self.secs[i_sec+1].chord
elif key == 't' or key == 'thick' or key == 'thickness':
key_value = (1-tt)*self.secs[i_sec].thick + tt*self.secs[i_sec+1].thick
elif key == 'twist':
key_value = (1-tt)*self.secs[i_sec].twist + tt*self.secs[i_sec+1].twist
else:
raise Exception('Unknown key:', key)
return key_value
def geo_secs(self, flip_x=False):
'''
Update surface sections
### Functions:
```text
1. Construct 2D unit curve (null in the BasicSection)
2. Transform to 3D curve
```
### Inputs:
```text
flip_x: True ~ flip section.xx in reverse order
```
'''
for i in range(self.n_sec):
self.secs[i].section(nn=self.nn, flip_x=flip_x, proj=self.project)
def geo(self, flip_x=False, update_sec=True):
'''
Generate surface geometry
### Inputs:
```text
flip_x: True ~ flip section.xx in reverse order
update_sec: True ~ update sections
```
'''
if update_sec:
self.geo_secs(flip_x=flip_x)
self.surfs = []
if self.l2d:
sec_ = copy.deepcopy(self.secs[0])
sec_.zLE = 1.0
surf = self.section_surf(self.secs[0], sec_, ns=self.ns)
self.surfs.append(surf)
else:
for i in range(self.n_sec-1):
surf = self.section_surf(self.secs[i], self.secs[i+1], ns=self.ns)
self.surfs.append(surf)
def geo_axisymmetric(self, phi, flip_x=False, update_sec=True):
'''
Generate axisymmetric surface geometry
### Inputs:
```text
phi: list or ndarray, position angle of control sections
flip_x: True ~ flip section.xx in reverse order
update_sec: True ~ update sections
```
'''
if update_sec:
self.geo_secs(flip_x=flip_x)
self.surfs = []
if self.l2d:
raise Exception('Axisymmetric geometry can not be 2D surface')
else:
for i in range(self.n_sec-1):
surf = self.section_surf_axisymmetric(self.secs[i], self.secs[i+1], phi[i], phi[i+1], ns=self.ns)
self.surfs.append(surf)
@staticmethod
def section_surf(sec0, sec1, ns=101):
'''
Interplot surface section between curves
>>> surf = section_surf(sec0, sec1, ns)
### Inputs:
```text
sec0, sec1: Section object
ns: number of spanwise points
```
### Return:
```text
surf: [surf_x, surf_y, surf_z]
list of ndarray [ns, nn]
```
'''
nn = sec0.x.shape[0]
surf_x = np.zeros((ns,nn))
surf_y = np.zeros((ns,nn))
surf_z = np.zeros((ns,nn))
for i in range(ns):
tt = 1.0*i/(ns-1.0)
surf_x[i,:] = (1-tt)*sec0.x + tt*sec1.x
surf_y[i,:] = (1-tt)*sec0.y + tt*sec1.y
surf_z[i,:] = (1-tt)*sec0.z + tt*sec1.z
surf = [surf_x, surf_y, surf_z]
return surf
@staticmethod
def section_surf_axisymmetric(sec0, sec1, phi0: float, phi1: float, ns=101):
'''
Interplot axisymmetric surface section between curves
>>> surf = section_surf_axisymmetric(sec0, sec1, ns)
### Inputs:
```text
sec0, sec1: Section object
phi0, phi1: angle (degree) about X-axis (X-Y plane is 0 degree)
ns: number of spanwise points
```
### Return:
```text
surf: [surf_x, surf_y, surf_z]
list of ndarray [ns, nn]
```
'''
nn = sec0.x.shape[0]
surf_x = np.zeros((ns,nn))
surf_y = | np.zeros((ns,nn)) | numpy.zeros |
from collections import defaultdict
import pytest
import numpy as np
import torch
from rl.policies import GaussianMLP
from rl.algos import PPO
class SampleTesterEnv:
def __init__(self, obs_dim, action_dim, done_state=10, gamma=0.99):
"""
A simple environment that unit tests whether or the
experience buffer and trajectory sampling code are
producing the correct output. This is to test for things
like off-by-one errors in the experience buffers or
reward-to-go calculations.
In other words:
Makes sure the "experience table" is of the form:
--------------------
s_0 | a_0 | r_0
--------------------
. . .
. . .
. . .
--------------------
s_T | a_T | r_T
--------------------
s_T+1 | |
--------------------
where entries are defined by the MDP transitions:
s_0 -> (s_1, a_0, r_0) -> ... -> (s_T+1, a_T, r_T)
"""
self.observation_space = | np.zeros(obs_dim) | numpy.zeros |
'''
Created on 21 Feb 2015
@author: <NAME> (<EMAIL>)
@copyright: (c) 2015 <NAME>
@license: MIT
'''
# standard library
from __future__ import division, print_function
import logging
# external libraries
import numpy as np
# local libraries
import spn
def equal_temperament(n):
"""Twelve-tone equal temperament (12TET) divides the octave into 12
equal parts, making the interval between two ajacent notes the twelfth
root of two.
The argument n can be a number or a list/tuple/iterable.
2^(1/12):1 --> 2^(n/12)
https://en.wikipedia.org/wiki/Equal_temperament
"""
ratio = np.power(2, (np.array(n)/12))
return ratio
def piano_key2freq(n, a=49, tuning=440):
"""Twelve-tone equal temperament tuning for a theoretically ideal piano.
The argument n can be a number or a list/tuple/iterable.
The 49th key, called A4 is tuned to the reference (tuning) frequency, normally
440Hz. The frequency is then given by
f(n) = 440*2^((n-49)/12)
https://en.wikipedia.org/wiki/Piano_key_frequencies
"""
frequency = tuning*equal_temperament(np.array(n)-a)
return frequency
def piano_freq2key(f, a=49, tuning=440, quantise=False):
"""Frequency [f] to twelve-tone equal temperament tuning for a theoretically
ideal piano, where 440Hz-->49
"""
key = 12* | np.log2(f/tuning) | numpy.log2 |
import datetime
from abc import abstractmethod
from typing import Sequence
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from navicatGA.exceptions import NoFitnessFunction, InvalidInput
from navicatGA.exception_messages import exception_messages
from navicatGA.progress_bars import set_progress_bars
from navicatGA.cache import set_lru_cache
from navicatGA.helpers import get_elapsed_time
from navicatGA.logger import configure_logger, close_logger
allowed_selection_strategies = {
"roulette_wheel",
"two_by_two",
"random",
"tournament",
"boltzmann",
}
class GenAlgSolver:
def __init__(
self,
n_genes: int,
fitness_function=None,
assembler=None,
max_gen: int = 1000,
max_conv: int = 100,
pop_size: int = 100,
mutation_rate: float = 0.15,
selection_rate: float = 0.5,
selection_strategy: str = "roulette_wheel",
excluded_genes: Sequence = None,
n_crossover_points: int = 1,
random_state: int = None,
lru_cache: bool = False,
scalarizer=None,
prune_duplicates=False,
verbose: bool = True,
show_stats: bool = False,
plot_results: bool = False,
to_stdout: bool = True,
to_file: bool = True,
logger_file: str = "output.log",
logger_level: str = "INFO",
progress_bars: bool = False,
problem_type: str = "base",
):
"""Base solver class for the GA.
Has the core methods and attributes that any GA run will require.
However, it lacks a way to create new offspring
evaluate fitness, and generate mutations.
Those must be defined in a child class.
Parameters:
:param n_genes: number of genes (variables) to have in each chromosome
:type n_genes: int
:param fitness_function: a fitness function that takes assembler(chromosome) and returns one (or more) fitness scores
:type fitness_function: object
:param assembler: a function that takes a chromosome and returns an (ideally hashable) object to be evaluated
:type assembler: object
:param max_gen: maximum number of generations to perform the optimization
:type max_gen: int
:param max_conv: maximum number of generations with same max fitness until convergence is assumed
:type max_conv : int
:param pop_size: number of chromosomes in population
:type pop_size: int
:param mutation_rate: rate at which random mutations occur
:type mutation_rate: float
:param selection_rate: top percentage of the population to be selected for crossover
:type selection_rate: float
:param selection_strategy: strategy to use for selection, several available
:type selection_strategy: string
:param excluded_genes: indices of chromosomes that should not be changed during run
:type excluded_genes: optional, array-like
:param n_crossover_points: number of slices to make for the crossover
:type n_crossover_points: int
:param random_state: fixed the random seed for the run
:type random_state: int, optional
:param lru_cache: whether to use lru_cacheing, which is monkeypatched into the class. Requires that the fitness function is hashable.
:type lru_cache: bool
:param scalarizer: chimera scalarizer object initialized to work on the results of fitness function
:type scalarizer: optional, object with a scalarize method that takes in a population fitness and rescales it
:param prune_duplicates: whether to prune duplicates in each generation
:type prune_duplicates: bool
:param verbose: whether to print iterations status
:type verbose: int
:param show_stats: whether to print stats at the end
:type show_stats: bool
:param plot_results: whether to plot results of the run at the end
:type plot_results: bool
:param to_stdout: whether to write output to stdout
:type to_stdout: bool
:param to_file: whether to write output to file
:type to_file: bool
:param logger_file: name of the file where output will be written if to_file is True
:type logger_file: string
:param progess_bars: whether to monkeypatch progress bars for monitoring run
:type progress_bars: bool
:param problem_type: passing a simple flag from child class for some in built hashable fitness functions.
:type problem_type: string
"""
if isinstance(random_state, int):
np.random.seed(random_state)
self.logger = configure_logger(
logger_file=logger_file,
logger_level=logger_level,
to_stdout=to_stdout,
to_file=to_file,
)
self.generations_ = 0
self.best_individual_ = None
self.best_fitness_ = 0
self.best_pfitness_ = 0
self.population_ = None
self.fitness_ = None
self.printable_fitness = None
self.mean_fitness_ = None
self.max_fitness_ = None
self.n_genes = n_genes
self.allowed_mutation_genes = np.arange(self.n_genes)
self.assembler = assembler
self.check_input_base(
fitness_function, selection_strategy, pop_size, excluded_genes
)
self.scalarizer = scalarizer
self.selection_strategy = selection_strategy
self.max_gen = max_gen
self.max_conv = max_conv
self.pop_size = pop_size
self.mutation_rate = mutation_rate
self.selection_rate = selection_rate
self.n_crossover_points = n_crossover_points
self.verbose = verbose
self.show_stats = show_stats
self.plot_results = plot_results
self.pop_keep = int(np.floor(selection_rate * pop_size))
if self.pop_keep < 2:
self.pop_keep = 2
self.prob_intervals = self.get_selection_probabilities()
self.n_matings = int(np.floor((self.pop_size - self.pop_keep) / 2))
self.n_mutations = self.get_number_mutations()
self.runtime_ = 0.0
self.problem_type = problem_type
self.prune_duplicates = prune_duplicates
self.temperature = 100
if progress_bars:
self.logger.info("Setting up progress bars through monkeypatching.")
set_progress_bars(self)
if lru_cache:
self.logger.info("Setting up lru cache through monkeypatching.")
set_lru_cache(self)
def check_input_base(
self, fitness_function, selection_strategy, pop_size: int, excluded_genes
):
"""
Function to check that the main arguments have been passed to the GenAlgSolver instance.
Parameters:
:param fitness_function: a fitness function that takes a chromosome and returns a fitness
:param selection_strategy: a selection strategy string that can be recognized by this class
:param pop_size: the number of chromosomes
:param excluded_genes: a sequence of genes that should not change or mutate
"""
if not fitness_function:
try:
getattr(self, "fitness_function")
except AttributeError:
raise NoFitnessFunction(
"A fitness function must be defined or provided as an argument"
)
else:
self.fitness_function = fitness_function
if selection_strategy not in allowed_selection_strategies:
raise InvalidInput(
exception_messages["InvalidSelectionStrategy"](
selection_strategy, allowed_selection_strategies
)
)
if pop_size < 2:
raise (InvalidInput(exception_messages["InvalidPopulationSize"]))
if isinstance(excluded_genes, (list, tuple, np.ndarray)):
self.allowed_mutation_genes = [
item
for item in self.allowed_mutation_genes
if item not in excluded_genes
]
elif excluded_genes is not None:
raise InvalidInput(
exception_messages["InvalidExcludedGenes"](excluded_genes)
)
def solve(self, niter=None):
"""
Performs the genetic algorithm optimization according to the parameters
loaded in __init__. Will run for max_gen or until it
converges for max_conv iterations, or for min(niter,max_gen) iterations if niter
is an integer. Will start using previous state if available.
Parameters:
:param niter: the number of generations to run
"""
start_time = datetime.datetime.now()
if self.mean_fitness_ is None:
mean_fitness = np.ndarray(shape=(1, 0))
else:
self.logger.info("Continuing run with previous mean fitness in memory.")
mean_fitness = self.mean_fitness_
if self.max_fitness_ is None:
max_fitness = np.ndarray(shape=(1, 0))
else:
self.logger.info("Continuing run with previous max fitness in memory.")
max_fitness = self.max_fitness_
if self.population_ is None:
self.logger.info("Initializing population.")
population = self.initialize_population()
else:
self.logger.info("Continuing run with previous population in memory.")
population = self.population_
fitness, printable_fitness = self.calculate_fitness(population)
fitness, population, printable_fitness = self.sort_by_fitness(
fitness, population, printable_fitness
)
gen_interval = max(round(self.max_gen / 10), 1)
gen_n = 1
conv = 0
if isinstance(niter, int):
niter = min(self.max_gen, niter)
else:
niter = self.max_gen
for _ in range(niter):
gen_n += 1
self.generations_ += 1
mean_fitness = np.append(mean_fitness, fitness.mean())
max_fitness = np.append(max_fitness, fitness[0])
ma, pa = self.select_parents(fitness)
ix = np.arange(0, self.pop_size - self.pop_keep - 1, 2)
xp = np.array(
list(map(lambda _: self.get_crossover_points(), range(self.n_matings)))
)
for i in range(xp.shape[0]):
population[-1 - ix[i], :] = self.create_offspring(
population[ma[i], :], population[pa[i], :], xp[i], "first"
)
population[-1 - ix[i] - 1, :] = self.create_offspring(
population[pa[i], :], population[ma[i], :], xp[i], "second"
)
population = self.mutate_population(population, self.n_mutations)
if self.prune_duplicates:
pruned_pop = np.zeros(shape=(1, self.n_genes), dtype=object)
pruned_pop[0, :] = population[0, :]
self.logger.debug(
f"Pruned pop set as {pruned_pop} and population set as {population}"
)
for i in range(1, self.pop_size):
try:
if not list(population[i]) == list(pruned_pop[-1]):
pruned_pop = np.vstack((pruned_pop, population[i]))
except Exception as m:
self.logger.debug(
f"Population comparison for pruning failed: {m}"
)
nrefill = self.pop_size - pruned_pop.shape[0]
if nrefill > 0:
self.logger.debug(
f"Replacing a total of {nrefill} chromosomes due to duplications."
)
population = np.vstack(
(pruned_pop, self.refill_population(nrefill))
)
rest_fitness, rest_printable_fitness = self.calculate_fitness(
population[1:, :]
)
fitness = np.hstack((fitness[0], rest_fitness))
for i in range(1, len(rest_fitness)):
printable_fitness[i] = rest_printable_fitness[i]
fitness, population, printable_fitness = self.sort_by_fitness(
fitness, population, printable_fitness
)
self.best_individual_ = population[0, :]
if np.isclose(self.best_fitness_, fitness[0]):
conv += 1
self.best_fitness_ = fitness[0]
self.best_pfitness_ = printable_fitness[0]
if self.verbose:
self.logger.info("Generation: {0}".format(self.generations_))
self.logger.info("Best fitness result: {0}".format(self.best_pfitness_))
self.logger.trace("Best individual: {0}".format(population[0, :]))
self.logger.trace(
"Population at generation: {0}: {1}".format(
self.generations_, population
)
)
if gen_n >= niter or conv > self.max_conv:
break
self.population_ = population
self.fitness_ = fitness
self.printable_fitness = printable_fitness
self.mean_fitness_ = mean_fitness
self.max_fitness_ = max_fitness
if self.plot_results:
self.plot_fitness_results(
self.mean_fitness_, self.max_fitness_, self.generations_
)
end_time = datetime.datetime.now()
self.runtime_, time_str = get_elapsed_time(start_time, end_time)
if self.show_stats:
self.print_stats(time_str)
def calculate_fitness(self, population):
"""
Calculates the fitness of the population using the defined fitness function.
Parameters:
:param population: population (array of chromosomes)
Returns:
:return fitness: scalarized fitness of the current population, will be used
:return pfitness: not-scalarized fitness of the current population, for printing
"""
if self.scalarizer is None:
nvals = 1
fitness = np.zeros(shape=(population.shape[0], nvals), dtype=float)
for i in range(population.shape[0]):
fitness[i, :] = self.fitness_function(self.assembler(population[i]))
fitness = np.squeeze(fitness)
pfitness = fitness
else:
nvals = len(self.scalarizer.goals)
fitness = np.zeros(shape=(population.shape[0], nvals), dtype=float)
for i in range(population.shape[0]):
fitness[i, :] = self.fitness_function(self.assembler(population[i]))
pfitness = fitness
fitness = np.ones((population.shape[0])) - self.scalarizer.scalarize(
fitness
)
return fitness, pfitness
def select_parents(self, fitness):
"""
Selects the parents according to a given selection strategy.
Options are:
roulette_wheel: Selects individuals from mating pool giving
higher probabilities to fitter individuals.
two_by_two: Pairs fittest individuals two by two
random: Selects individuals from mating pool randomly.
tournament: Selects individuals by choosing groups of 3 candidate
individuals and then selecting the fittest one from the 3.
Parameters:
:param fitness: the fitness values of the whole population at a given iteration
Returns:
:return (ma, pa): a tuple containing the selected 2 parents for each mating
"""
ma, pa = None, None
if (self.selection_strategy == "roulette_wheel") or (
self.selection_strategy == "random"
):
self.logger.trace(
f"Selection probabilities for kept population are {self.prob_intervals}."
)
ma = np.apply_along_axis(
self.interval_selection, 1, np.random.rand(self.n_matings, 1)
)
pa = np.apply_along_axis(
self.interval_selection, 1, np.random.rand(self.n_matings, 1)
)
elif self.selection_strategy == "boltzmann":
self.prob_intervals = self.get_boltzmann_probabilities(fitness)
self.logger.trace(
f"Selection probabilities for kept population are {self.prob_intervals}."
)
ma = np.apply_along_axis(
self.interval_selection, 1, np.random.rand(self.n_matings, 1)
)
pa = np.apply_along_axis(
self.interval_selection, 1, np.random.rand(self.n_matings, 1)
)
elif self.selection_strategy == "two_by_two":
range_max = self.n_matings * 2
ma = np.arange(range_max)[::2]
pa = np.arange(range_max)[1::2]
if ma.shape[0] > pa.shape[0]:
ma = ma[:-1]
elif self.selection_strategy == "tournament":
range_max = self.n_matings * 2
ma = self.tournament_selection(fitness, range_max)
pa = self.tournament_selection(fitness, range_max)
return ma, pa
def interval_selection(self, value):
"""
Select based on self.prob_intervals, which are given by the selection strategy.
Parameters:
:param value: random value defining which individual is selected from the probability intervals
Returns:
:return: the selected individual from the population
"""
return np.argmin(value > self.prob_intervals) - 1
def tournament_selection(self, fitness, range_max):
"""
Performs tournament selection.
Parameters:
:param fitness: the fitness values of the population at a given iteration
:param range_max: range of individuals that can be selected for the tournament
Returns:
:return: the selected individuals
"""
selected_individuals = np.random.choice(range_max, size=(self.n_matings, 3))
return np.array(
list(
map(
lambda x: self.tournament_selection_helper(x, fitness),
selected_individuals,
)
)
)
@staticmethod
def tournament_selection_helper(selected_individuals, fitness):
"""
Helper for tournament selection method. Selects the fittest individual
from a pool of candidate individuals.
"""
individuals_fitness = fitness[selected_individuals]
return selected_individuals[np.argmax(individuals_fitness)]
def get_selection_probabilities(self):
"""
Calculates selection probabilities either randomly or scaled by position.
"""
if self.selection_strategy == "roulette_wheel":
mating_prob = (
np.arange(1, self.pop_keep + 1) / np.arange(1, self.pop_keep + 1).sum()
)[::-1]
return np.array([0, *np.cumsum(mating_prob[: self.pop_keep + 1])])
elif self.selection_strategy == "random":
return np.linspace(0, 1, self.pop_keep + 1)
def get_boltzmann_probabilities(self, fitness):
"""
Calculates selection probabilities according to a fitness Boltzmann distribution with an increasing temperature.
"""
nfit = fitness[0 : self.pop_keep + 1]
self.logger.trace(f"Boltzmann initial preserved fitnesses: {nfit}")
sfit = 1 / ((nfit - nfit.min()) / (nfit.max() - nfit.min()) + 1e-6)
self.logger.trace(f"Boltzmann initial scaled fitnesses: {sfit}")
mating_prob = np.exp(-sfit * (1 / (self.temperature)))
self.logger.trace(f"Pre-normalized probabilities: {mating_prob}")
C = np.sum(mating_prob)
mating_prob = mating_prob * (1 / C)
self.logger.trace(f"Normalized probabilities: {mating_prob}")
self.temperature += 0.1 * self.temperature
self.logger.debug(f"Temperature increased to {self.temperature}.")
return np.array([0, *np.cumsum(mating_prob[: self.pop_keep])])
def get_number_mutations(self):
"""Returns the number of mutations that need to be performed."""
return int(np.ceil((self.pop_size - 1) * self.n_genes * self.mutation_rate))
@staticmethod
def sort_by_fitness(fitness, population, printable_fitness):
"""
Sorts fitness, population and printable fitness according to fitness.
"""
sorted_fitness = | np.argsort(fitness) | numpy.argsort |
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
from ..Tools.RotTrans import RotTrans
import os
from ..Pos.GetAberrationAngle import GetAberrationAngle
from .. import Globals
import RecarrayTools as RT
from . import MagGlobals
def _ReadMSM(Date,Minute=False,res=None,Ab=None,DetectGaps=None,Length=False):
'''
Reads binary magnetometer data from MESSENGER.
Args:
Date: 32-bit(minimum) integer with date in format yyyymmdd.
Minute: If True - routing will read minute averages of MAG data,
if False, then full resolution data will be read.
res: Set resample resolution in seconds for data, by default res=None -
no resampling, res=0.05 for evenly spaced 20Hz sampling.
Ab: Angle to aberate X and Y components of the data by, in degrees.
When set to None, the aberation angle will be found automatically.
DetectGaps: Largest data gap size (in hours) to interpolate over,
if DetectGaps=None then all gaps will be interpolated over,
otherwise gaps will be filled with NaN.
Returns:
np.recarray of MAG data
'''
fname='{:08d}.bin'.format(Date)
path = MagGlobals.paths['MSM']
if Minute:
path += 'Minute/'
else:
path += 'Full/'
dtype = MagGlobals.dtypes['MSM']
if os.path.isfile(path+fname) == False:
if Length:
return 0
else:
out = | np.recarray(0,dtype=dtype) | numpy.recarray |
"""Functions to clean images by fitting linear trends to the initial scans."""
try:
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
HAS_MPL = True
except ImportError:
HAS_MPL = False
from .fit import contiguous_regions
from .utils import jit, vectorize
from .histograms import histogram2d
import numpy as np
__all__ = ["fit_full_image", "display_intermediate"]
@vectorize('(float64(float64,float64,float64,float64))', nopython=True)
def _align_fast(x, scan, m, q):
"""Align ``scan`` to a linear function."""
return scan - x * m - q
XBUFFER = None
YBUFFER = None
def _get_coords(xedges, yedges):
"""Get coordinates given the edges of the histogram."""
global XBUFFER, YBUFFER
if XBUFFER is None:
xcenters = (xedges[:-1] + xedges[1:]) / 2
ycenters = (yedges[:-1] + yedges[1:]) / 2
X, Y = np.meshgrid(xcenters, ycenters)
XBUFFER = X
YBUFFER = Y
return XBUFFER, YBUFFER
EXPOMAP = None
def _calculate_image(x, y, counts, bx, by, nsamp):
"""Calculate the image."""
global EXPOMAP
if EXPOMAP is None:
EXPOMAP, xedges, yedges = histogram2d(x, y, bins=(bx, by),
weights=nsamp)
histograms, xedges, yedges = \
histogram2d(x, y, bins=(bx, by),
weights=[counts * nsamp, (counts) ** 2 * nsamp])
img, img_var = histograms
X, Y = _get_coords(xedges, yedges)
good = EXPOMAP > 0
mean = img.copy()
mean[good] /= EXPOMAP[good]
img_var[good] = img_var[good] / EXPOMAP[good] - mean[good] ** 2
return X, Y, mean.T, img_var.T
@jit # (nopython=True)
def _align_all(newd_t, newd_c, data_idx, par):
ms = np.zeros_like(newd_c, dtype=np.float64)
qs = np.zeros_like(newd_c, dtype=np.float64)
for i_p in range(0, len(par), 2):
i0, i1 = data_idx[i_p // 2]
if i0 == i1:
continue
sliceobj = slice(i0, i1)
ms[sliceobj] = par[i_p]
qs[sliceobj] = par[i_p + 1]
return _align_fast(newd_t, newd_c, ms, qs)
def counter(initial_value=0):
count = initial_value
while True:
yield count
count += 1
ITERATION_COUNT = counter(0)
CURR_CHANNEL = "Feed0_RCP"
def _save_intermediate(filename, par):
np.savetxt(filename, par)
def _get_saved_pars(filename):
return np.genfromtxt(filename)
def _save_iteration(par):
iteration = next(ITERATION_COUNT)
print(iteration, end="\r")
if iteration % 2 == 0:
_save_intermediate("out_iter_{}_{:03d}.txt".format(CURR_CHANNEL,
iteration), par)
def _obj_fun(par, data, data_idx, excluded, bx, by):
"""
This is the function we have to minimize.
Parameters
----------
par : array([m0, q0, m1, q1, ...])
linear baseline parameters for the image.
data : [times, idxs, x, y, counts]
All five quantities are ``numpy`` ``array``s; ``time`` is time
from the start of the scan; ``x``, ``y`` are the image coordinates,
``idx`` corresponds to the scan number and ``counts`` to the scan
values at those coordinates.
excluded : [[centerx0, centery0, radius0]]
list of circular regions to exclude from fitting (e.g. strong sources
that might alter the total rms)
"""
newd_t, _, newd_x, newd_y, newd_c, newd_e = data
newd_c_new = _align_all(newd_t, newd_c, data_idx, par)
X, Y, img, img_var = _calculate_image(newd_x, newd_y, newd_c_new, bx, by,
newd_e)
good = img != 0.
if excluded is not None:
for e in excluded:
centerx, centery, radius = e
filt = (X - centerx) ** 2 + (Y - centery) ** 2 < radius ** 2
good[filt] = 0
stat = np.sum(img_var[good]) + np.var(img[good]) * img[good].size
return stat
def _resample_scans(data):
"""Resample all scans to match the pixels of the image."""
t, idx, x, y, c = data
xmax, xmin = np.max(x), np.min(x)
ymax, ymin = np.max(y), np.min(y)
x_range = xmax - xmin
y_range = ymax - ymin
bx = np.linspace(xmin, xmax, int(x_range) + 1)
by = np.linspace(ymin, ymax, int(y_range) + 1)
newt = np.array([], dtype=np.float64)
newi = np.array([], dtype=int)
newx = np.array([], dtype=np.float64)
newy = np.array([], dtype=np.float64)
newc = np.array([], dtype=np.float64)
newe = np.array([], dtype=np.float64)
for i in list(set(idx)):
good = idx == i
x_filt = x[good]
n = len(x_filt)
if n == 0:
continue
y_filt = y[good]
c_filt = c[good]
t_filt = t[good]
t_filt -= t_filt[0]
hists, _, _ = \
histogram2d(x_filt, y_filt, bins=(bx, by),
weights=[np.ones(n), t_filt, x_filt, y_filt, c_filt])
expo, time, X, Y, counts = hists
good = expo > 0
goodexpo = expo[good]
tdum = np.ndarray.flatten(time[good] / goodexpo)
cdum = np.ndarray.flatten(counts[good] / goodexpo)
idum = np.ndarray.flatten(i + np.zeros(len(goodexpo), dtype=int))
xdum = np.ndarray.flatten(X[good] / goodexpo)
ydum = np.ndarray.flatten(Y[good] / goodexpo)
edum = np.ndarray.flatten(goodexpo)
newt = np.append(newt, tdum)
newc = np.append(newc, cdum)
newi = np.append(newi, idum)
newx = np.append(newx, xdum)
newy = np.append(newy, ydum)
newe = np.append(newe, edum)
return [newt, newi, newx, newy, newc, newe], bx, by
def _get_data_idx(par, idx):
"""Get the index in the data arrays corresponding to different scans."""
data_idx = []
par_pairs = list(zip(par[:-1:2], par[1::2]))
for i_p in range(len(par_pairs)):
good = idx == i_p
if not np.any(good):
data_idx.append([0, 0])
else:
data_idx.append(contiguous_regions(good)[0])
data_idx = np.array(data_idx, dtype=int)
return data_idx
def fit_full_image(scanset, chan="Feed0_RCP", feed=0, excluded=None, par=None):
"""Get a clean image by subtracting linear trends from the initial scans.
Parameters
----------
scanset : a :class:``ScanSet`` instance
The scanset to be fit
Other parameters
----------------
chan : str
channel of the scanset to be fit. Defaults to ``"Feed0_RCP"``
feed : int
feed of the scanset to be fit. Defaults to 0
excluded : [[centerx0, centery0, radius0]]
List of circular regions to exclude from fitting (e.g. strong sources
that might alter the total rms)
par : [m0, q0, m1, q1, ...] or None
Initial parameters -- slope and intercept for linear trends to be
subtracted from the scans
Returns
-------
new_counts : array-like
The new Counts column for scanset, where a baseline has been subtracted
from each scan to produce the cleanest image background.
"""
from scipy.optimize import minimize
global EXPOMAP, XBUFFER, YBUFFER, ITERATION_COUNT, CURR_CHANNEL
CURR_CHANNEL = chan
EXPOMAP = None
XBUFFER = None
YBUFFER = None
X = np.array(scanset['x'][:, feed], dtype=np.float64)
Y = | np.array(scanset['y'][:, feed], dtype=np.float64) | numpy.array |
from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import warnings
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage
import skimage.data
import cv2
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.augmenters import contrast as contrast_lib
from imgaug.testutils import ArgCopyingMagicMock, keypoints_equal, reseed
class TestGammaContrast(unittest.TestCase):
def setUp(self):
reseed()
def test___init___tuple_to_uniform(self):
aug = iaa.GammaContrast((1, 2))
assert isinstance(aug.params1d[0], iap.Uniform)
assert isinstance(aug.params1d[0].a, iap.Deterministic)
assert isinstance(aug.params1d[0].b, iap.Deterministic)
assert aug.params1d[0].a.value == 1
assert aug.params1d[0].b.value == 2
def test___init___list_to_choice(self):
aug = iaa.GammaContrast([1, 2])
assert isinstance(aug.params1d[0], iap.Choice)
assert np.all([val in aug.params1d[0].a for val in [1, 2]])
def test_images_basic_functionality(self):
img = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
img = np.uint8(img)
img3d = np.tile(img[:, :, np.newaxis], (1, 1, 3))
# check basic functionality with gamma=1 or 2 (deterministic) and
# per_channel on/off (makes
# no difference due to deterministic gamma)
for per_channel in [False, 0, 0.0, True, 1, 1.0]:
for gamma in [1, 2]:
aug = iaa.GammaContrast(
gamma=iap.Deterministic(gamma),
per_channel=per_channel)
img_aug = aug.augment_image(img)
img3d_aug = aug.augment_image(img3d)
assert img_aug.dtype.name == "uint8"
assert img3d_aug.dtype.name == "uint8"
assert np.array_equal(
img_aug,
skimage.exposure.adjust_gamma(img, gamma=gamma))
assert np.array_equal(
img3d_aug,
skimage.exposure.adjust_gamma(img3d, gamma=gamma))
def test_per_channel_is_float(self):
# check that per_channel at 50% prob works
aug = iaa.GammaContrast((0.5, 2.0), per_channel=0.5)
seen = [False, False]
img1000d = np.zeros((1, 1, 1000), dtype=np.uint8) + 128
for _ in sm.xrange(100):
img_aug = aug.augment_image(img1000d)
assert img_aug.dtype.name == "uint8"
nb_values_uq = len(set(img_aug.flatten().tolist()))
if nb_values_uq == 1:
seen[0] = True
else:
seen[1] = True
if np.all(seen):
break
assert np.all(seen)
def test_keypoints_not_changed(self):
aug = iaa.GammaContrast(gamma=2)
kpsoi = ia.KeypointsOnImage([ia.Keypoint(1, 1)], shape=(3, 3, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])
assert keypoints_equal([kpsoi], kpsoi_aug)
def test_heatmaps_not_changed(self):
aug = iaa.GammaContrast(gamma=2)
heatmaps_arr = np.zeros((3, 3, 1), dtype=np.float32) + 0.5
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
heatmaps_aug = aug.augment_heatmaps([heatmaps])[0]
assert np.allclose(heatmaps.arr_0to1, heatmaps_aug.arr_0to1)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 128, dtype=np.uint8)
aug = iaa.GammaContrast(0.5)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 128, dtype=np.uint8)
aug = iaa.GammaContrast(0.5)
image_aug = aug(image=image)
assert np.any(image_aug != 128)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_other_dtypes_uint_int(self):
dts = [np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64]
for dtype in dts:
dtype = np.dtype(dtype)
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
exps = [1, 2, 3]
values = [0, 100, int(center_value + 0.1*max_value)]
tolerances = [0, 0, 1e-8 * max_value
if dtype
in [np.uint64, np.int64] else 0]
for exp in exps:
aug = iaa.GammaContrast(exp)
for value, tolerance in zip(values, tolerances):
with self.subTest(dtype=dtype.name, exp=exp,
nb_channels=None):
image = np.full((3, 3), value, dtype=dtype)
expected = (
(
(image.astype(np.float128) / max_value)
** exp
) * max_value
).astype(dtype)
image_aug = aug.augment_image(image)
value_aug = int(image_aug[0, 0])
value_expected = int(expected[0, 0])
diff = abs(value_aug - value_expected)
assert image_aug.dtype.name == dtype.name
assert len(np.unique(image_aug)) == 1
assert diff <= tolerance
# test other channel numbers
for nb_channels in [1, 2, 3, 4, 5, 7, 11]:
with self.subTest(dtype=dtype.name, exp=exp,
nb_channels=nb_channels):
image = np.full((3, 3), value, dtype=dtype)
image = np.tile(image[..., np.newaxis],
(1, 1, nb_channels))
for c in sm.xrange(nb_channels):
image[..., c] += c
expected = (
(
(image.astype(np.float128) / max_value)
** exp
) * max_value
).astype(dtype)
image_aug = aug.augment_image(image)
assert image_aug.shape == (3, 3, nb_channels)
assert image_aug.dtype.name == dtype.name
# can be less than nb_channels when multiple input
# values map to the same output value
# mapping distribution can behave exponential with
# slow start and fast growth at the end
assert len(np.unique(image_aug)) <= nb_channels
for c in sm.xrange(nb_channels):
value_aug = int(image_aug[0, 0, c])
value_expected = int(expected[0, 0, c])
diff = abs(value_aug - value_expected)
assert diff <= tolerance
def test_other_dtypes_float(self):
dts = [np.float16, np.float32, np.float64]
for dtype in dts:
dtype = np.dtype(dtype)
def _allclose(a, b):
atol = 1e-3 if dtype == np.float16 else 1e-8
return np.allclose(a, b, atol=atol, rtol=0)
exps = [1, 2]
isize = np.dtype(dtype).itemsize
values = [0, 1.0, 50.0, 100 ** (isize - 1)]
for exp in exps:
aug = iaa.GammaContrast(exp)
for value in values:
with self.subTest(dtype=dtype.name, exp=exp,
nb_channels=None):
image = np.full((3, 3), value, dtype=dtype)
expected = (
image.astype(np.float128)
** exp
).astype(dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype == np.dtype(dtype)
assert _allclose(image_aug, expected)
# test other channel numbers
for nb_channels in [1, 2, 3, 4, 5, 7, 11]:
with self.subTest(dtype=dtype.name, exp=exp,
nb_channels=nb_channels):
image = np.full((3, 3), value, dtype=dtype)
image = np.tile(image[..., np.newaxis],
(1, 1, nb_channels))
for c in sm.xrange(nb_channels):
image[..., c] += float(c)
expected = (
image.astype(np.float128)
** exp
).astype(dtype)
image_aug = aug.augment_image(image)
assert image_aug.shape == (3, 3, nb_channels)
assert image_aug.dtype.name == dtype.name
for c in sm.xrange(nb_channels):
value_aug = image_aug[0, 0, c]
value_expected = expected[0, 0, c]
assert _allclose(value_aug, value_expected)
class TestSigmoidContrast(unittest.TestCase):
def setUp(self):
reseed()
def test___init___tuple_to_uniform(self):
# check that tuple to uniform works
# note that gain and cutoff are saved in inverted order in
# _ContrastFuncWrapper to match the order of skimage's function
aug = iaa.SigmoidContrast(gain=(1, 2), cutoff=(0.25, 0.75))
assert isinstance(aug.params1d[0], iap.Uniform)
assert isinstance(aug.params1d[0].a, iap.Deterministic)
assert isinstance(aug.params1d[0].b, iap.Deterministic)
assert aug.params1d[0].a.value == 1
assert aug.params1d[0].b.value == 2
assert isinstance(aug.params1d[1], iap.Uniform)
assert isinstance(aug.params1d[1].a, iap.Deterministic)
assert isinstance(aug.params1d[1].b, iap.Deterministic)
assert np.allclose(aug.params1d[1].a.value, 0.25)
assert np.allclose(aug.params1d[1].b.value, 0.75)
def test___init___list_to_choice(self):
# check that list to choice works
# note that gain and cutoff are saved in inverted order in
# _ContrastFuncWrapper to match the order of skimage's function
aug = iaa.SigmoidContrast(gain=[1, 2], cutoff=[0.25, 0.75])
assert isinstance(aug.params1d[0], iap.Choice)
assert np.all([val in aug.params1d[0].a for val in [1, 2]])
assert isinstance(aug.params1d[1], iap.Choice)
assert np.all([
np.allclose(val, val_choice)
for val, val_choice
in zip([0.25, 0.75], aug.params1d[1].a)])
def test_images_basic_functionality(self):
img = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
img = np.uint8(img)
img3d = np.tile(img[:, :, np.newaxis], (1, 1, 3))
# check basic functionality with per_chanenl on/off (makes no
# difference due to deterministic parameters)
for per_channel in [False, 0, 0.0, True, 1, 1.0]:
for gain, cutoff in itertools.product([5, 10], [0.25, 0.75]):
with self.subTest(gain=gain, cutoff=cutoff,
per_channel=per_channel):
aug = iaa.SigmoidContrast(
gain=iap.Deterministic(gain),
cutoff=iap.Deterministic(cutoff),
per_channel=per_channel)
img_aug = aug.augment_image(img)
img3d_aug = aug.augment_image(img3d)
assert img_aug.dtype.name == "uint8"
assert img3d_aug.dtype.name == "uint8"
assert np.array_equal(
img_aug,
skimage.exposure.adjust_sigmoid(
img, gain=gain, cutoff=cutoff))
assert np.array_equal(
img3d_aug,
skimage.exposure.adjust_sigmoid(
img3d, gain=gain, cutoff=cutoff))
def test_per_channel_is_float(self):
# check that per_channel at 50% prob works
aug = iaa.SigmoidContrast(gain=(1, 10),
cutoff=(0.25, 0.75),
per_channel=0.5)
seen = [False, False]
img1000d = np.zeros((1, 1, 1000), dtype=np.uint8) + 128
for _ in sm.xrange(100):
img_aug = aug.augment_image(img1000d)
assert img_aug.dtype.name == "uint8"
nb_values_uq = len(set(img_aug.flatten().tolist()))
if nb_values_uq == 1:
seen[0] = True
else:
seen[1] = True
if np.all(seen):
break
assert np.all(seen)
def test_keypoints_dont_change(self):
aug = iaa.SigmoidContrast(gain=10, cutoff=0.5)
kpsoi = ia.KeypointsOnImage([ia.Keypoint(1, 1)], shape=(3, 3, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])
assert keypoints_equal([kpsoi], kpsoi_aug)
def test_heatmaps_dont_change(self):
aug = iaa.SigmoidContrast(gain=10, cutoff=0.5)
heatmaps_arr = np.zeros((3, 3, 1), dtype=np.float32) + 0.5
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
heatmaps_aug = aug.augment_heatmaps([heatmaps])[0]
assert np.allclose(heatmaps.arr_0to1, heatmaps_aug.arr_0to1)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 128, dtype=np.uint8)
aug = iaa.SigmoidContrast(gain=10, cutoff=0.5)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 128, dtype=np.uint8)
aug = iaa.SigmoidContrast(gain=10, cutoff=1.0)
image_aug = aug(image=image)
assert np.any(image_aug != 128)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_other_dtypes_uint_int(self):
dtypes = [np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64]
for dtype in dtypes:
dtype = np.dtype(dtype)
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
gains = [5, 20]
cutoffs = [0.25, 0.75]
values = [0, 100, int(center_value + 0.1 * max_value)]
tmax = 1e-8 * max_value if dtype in [np.uint64, np.int64] else 0
tolerances = [tmax, tmax, tmax]
for gain, cutoff in itertools.product(gains, cutoffs):
with self.subTest(dtype=dtype.name, gain=gain, cutoff=cutoff):
aug = iaa.SigmoidContrast(gain=gain, cutoff=cutoff)
for value, tolerance in zip(values, tolerances):
image = np.full((3, 3), value, dtype=dtype)
# TODO this looks like the equation commented out
# should actually the correct one, but when using
# it we get a difference between expectation and
# skimage ground truth
# 1/(1 + exp(gain*(cutoff - I_ij/max)))
expected = (
1
/ (
1
+ np.exp(
gain
* (
cutoff
- image.astype(np.float128)/max_value
)
)
)
)
expected = (expected * max_value).astype(dtype)
# expected = (
# 1/(1 + np.exp(gain * (
# cutoff - (
# image.astype(np.float128)-min_value
# )/dynamic_range
# ))))
# expected = (
# min_value + expected * dynamic_range).astype(dtype)
image_aug = aug.augment_image(image)
value_aug = int(image_aug[0, 0])
value_expected = int(expected[0, 0])
diff = abs(value_aug - value_expected)
assert image_aug.dtype.name == dtype.name
assert len(np.unique(image_aug)) == 1
assert diff <= tolerance
def test_other_dtypes_float(self):
dtypes = [np.float16, np.float32, np.float64]
for dtype in dtypes:
dtype = np.dtype(dtype)
def _allclose(a, b):
atol = 1e-3 if dtype == np.float16 else 1e-8
return np.allclose(a, b, atol=atol, rtol=0)
gains = [5, 20]
cutoffs = [0.25, 0.75]
isize = np.dtype(dtype).itemsize
values = [0, 1.0, 50.0, 100 ** (isize - 1)]
for gain, cutoff in itertools.product(gains, cutoffs):
with self.subTest(dtype=dtype, gain=gain, cutoff=cutoff):
aug = iaa.SigmoidContrast(gain=gain, cutoff=cutoff)
for value in values:
image = np.full((3, 3), value, dtype=dtype)
expected = (
1
/ (
1
+ np.exp(
gain
* (
cutoff
- image.astype(np.float128)
)
)
)
).astype(dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype.name
assert _allclose(image_aug, expected)
class TestLogContrast(unittest.TestCase):
def setUp(self):
reseed()
def test_images_basic_functionality(self):
img = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
img = np.uint8(img)
img3d = np.tile(img[:, :, np.newaxis], (1, 1, 3))
# check basic functionality with gain=1 or 2 (deterministic) and
# per_channel on/off (makes no difference due to deterministic gain)
for per_channel in [False, 0, 0.0, True, 1, 1.0]:
for gain in [1, 2]:
with self.subTest(gain=gain, per_channel=per_channel):
aug = iaa.LogContrast(
gain=iap.Deterministic(gain),
per_channel=per_channel)
img_aug = aug.augment_image(img)
img3d_aug = aug.augment_image(img3d)
assert img_aug.dtype.name == "uint8"
assert img3d_aug.dtype.name == "uint8"
assert np.array_equal(
img_aug,
skimage.exposure.adjust_log(img, gain=gain))
assert np.array_equal(
img3d_aug,
skimage.exposure.adjust_log(img3d, gain=gain))
def test___init___tuple_to_uniform(self):
aug = iaa.LogContrast((1, 2))
assert isinstance(aug.params1d[0], iap.Uniform)
assert isinstance(aug.params1d[0].a, iap.Deterministic)
assert isinstance(aug.params1d[0].b, iap.Deterministic)
assert aug.params1d[0].a.value == 1
assert aug.params1d[0].b.value == 2
def test___init___list_to_choice(self):
aug = iaa.LogContrast([1, 2])
assert isinstance(aug.params1d[0], iap.Choice)
assert np.all([val in aug.params1d[0].a for val in [1, 2]])
def test_per_channel_is_float(self):
# check that per_channel at 50% prob works
aug = iaa.LogContrast((0.5, 2.0), per_channel=0.5)
seen = [False, False]
img1000d = np.zeros((1, 1, 1000), dtype=np.uint8) + 128
for _ in sm.xrange(100):
img_aug = aug.augment_image(img1000d)
assert img_aug.dtype.name == "uint8"
nb_values_uq = len(set(img_aug.flatten().tolist()))
if nb_values_uq == 1:
seen[0] = True
else:
seen[1] = True
if np.all(seen):
break
assert np.all(seen)
def test_keypoints_not_changed(self):
aug = iaa.LogContrast(gain=2)
kpsoi = ia.KeypointsOnImage([ia.Keypoint(1, 1)], shape=(3, 3, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])
assert keypoints_equal([kpsoi], kpsoi_aug)
def test_heatmaps_not_changed(self):
aug = iaa.LogContrast(gain=2)
heatmap_arr = np.zeros((3, 3, 1), dtype=np.float32) + 0.5
heatmaps = ia.HeatmapsOnImage(heatmap_arr, shape=(3, 3, 3))
heatmaps_aug = aug.augment_heatmaps([heatmaps])[0]
assert np.allclose(heatmaps.arr_0to1, heatmaps_aug.arr_0to1)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 128, dtype=np.uint8)
aug = iaa.LogContrast(gain=2)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 128, dtype=np.uint8)
aug = iaa.LogContrast(gain=2)
image_aug = aug(image=image)
assert np.any(image_aug != 128)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_other_dtypes_uint_int(self):
# support before 1.17:
# [np.uint8, np.uint16, np.uint32, np.uint64,
# np.int8, np.int16, np.int32, np.int64]
# support beginning with 1.17:
# [np.uint8, np.uint16,
# np.int8, np.int16]
# uint, int
dtypes = [np.uint8, np.uint16, np.int8, np.int16]
for dtype in dtypes:
dtype = np.dtype(dtype)
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
gains = [0.5, 0.75, 1.0, 1.1]
values = [0, 100, int(center_value + 0.1 * max_value)]
tmax = 1e-8 * max_value if dtype in [np.uint64, np.int64] else 0
tolerances = [0, tmax, tmax]
for gain in gains:
aug = iaa.LogContrast(gain)
for value, tolerance in zip(values, tolerances):
with self.subTest(dtype=dtype.name, gain=gain):
image = np.full((3, 3), value, dtype=dtype)
expected = (
gain
* np.log2(
1 + (image.astype(np.float128)/max_value)
)
)
expected = (expected*max_value).astype(dtype)
image_aug = aug.augment_image(image)
value_aug = int(image_aug[0, 0])
value_expected = int(expected[0, 0])
diff = abs(value_aug - value_expected)
assert image_aug.dtype.name == dtype.name
assert len(np.unique(image_aug)) == 1
assert diff <= tolerance
def test_other_dtypes_float(self):
dtypes = [np.float16, np.float32, np.float64]
for dtype in dtypes:
dtype = np.dtype(dtype)
def _allclose(a, b):
# since numpy 1.17 this needs for some reason at least 1e-5 as
# the tolerance, previously 1e-8 worked
atol = 1e-2 if dtype == np.float16 else 1e-5
return np.allclose(a, b, atol=atol, rtol=0)
gains = [0.5, 0.75, 1.0, 1.1]
isize = np.dtype(dtype).itemsize
values = [0, 1.0, 50.0, 100 ** (isize - 1)]
for gain in gains:
aug = iaa.LogContrast(gain)
for value in values:
with self.subTest(dtype=dtype.name, gain=gain):
image = np.full((3, 3), value, dtype=dtype)
expected = (
gain
* np.log2(
1 + image.astype(np.float128)
)
)
expected = expected.astype(dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert _allclose(image_aug, expected)
class TestLinearContrast(unittest.TestCase):
def setUp(self):
reseed()
def test_images_basic_functionality(self):
img = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
img = np.uint8(img)
img3d = np.tile(img[:, :, np.newaxis], (1, 1, 3))
# check basic functionality with alpha=1 or 2 (deterministic) and
# per_channel on/off (makes no difference due to deterministic alpha)
for per_channel in [False, 0, 0.0, True, 1, 1.0]:
for alpha in [1, 2]:
with self.subTest(alpha=alpha, per_channel=per_channel):
aug = iaa.LinearContrast(
alpha=iap.Deterministic(alpha),
per_channel=per_channel)
img_aug = aug.augment_image(img)
img3d_aug = aug.augment_image(img3d)
assert img_aug.dtype.name == "uint8"
assert img3d_aug.dtype.name == "uint8"
assert np.array_equal(
img_aug,
contrast_lib.adjust_contrast_linear(img, alpha=alpha))
assert np.array_equal(
img3d_aug,
contrast_lib.adjust_contrast_linear(img3d, alpha=alpha))
def test___init___tuple_to_uniform(self):
aug = iaa.LinearContrast((1, 2))
assert isinstance(aug.params1d[0], iap.Uniform)
assert isinstance(aug.params1d[0].a, iap.Deterministic)
assert isinstance(aug.params1d[0].b, iap.Deterministic)
assert aug.params1d[0].a.value == 1
assert aug.params1d[0].b.value == 2
def test___init___list_to_choice(self):
aug = iaa.LinearContrast([1, 2])
assert isinstance(aug.params1d[0], iap.Choice)
assert np.all([val in aug.params1d[0].a for val in [1, 2]])
def test_float_as_per_channel(self):
# check that per_channel at 50% prob works
aug = iaa.LinearContrast((0.5, 2.0), per_channel=0.5)
seen = [False, False]
# must not use just value 128 here, otherwise nothing will change as
# all values would have distance 0 to 128
img1000d = np.zeros((1, 1, 1000), dtype=np.uint8) + 128 + 20
for _ in sm.xrange(100):
img_aug = aug.augment_image(img1000d)
assert img_aug.dtype.name == "uint8"
nb_values_uq = len(set(img_aug.flatten().tolist()))
if nb_values_uq == 1:
seen[0] = True
else:
seen[1] = True
if np.all(seen):
break
assert np.all(seen)
def test_keypoints_not_changed(self):
aug = iaa.LinearContrast(alpha=2)
kpsoi = ia.KeypointsOnImage([ia.Keypoint(1, 1)], shape=(3, 3, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])
assert keypoints_equal([kpsoi], kpsoi_aug)
def test_heatmaps_not_changed(self):
aug = iaa.LinearContrast(alpha=2)
heatmaps_arr = np.zeros((3, 3, 1), dtype=np.float32) + 0.5
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
heatmaps_aug = aug.augment_heatmaps([heatmaps])[0]
assert | np.allclose(heatmaps.arr_0to1, heatmaps_aug.arr_0to1) | numpy.allclose |
import os
import numpy as np
from array import array
def write_double_to_bin(filename, d_array):
output_file = open(filename, 'wb')
float_array = array('d', d_array)
float_array.tofile(output_file)
output_file.close()
def read_double_from_bin(filename):
input_file = open(filename, 'rb')
float_array = array('d')
float_array.fromstring(input_file.read())
return np.asarray(float_array)
def read_train_kernel(pan_array, kernel_dir):
valid_pan = []
valid_hyp = []
for pan in pan_array:
try:
flag = np.loadtxt(os.path.join(kernel_dir, 'train_flag_{}.txt'.format(pan)), dtype=int)
flag = np.atleast_1d(flag)[0]
if(flag):
hyp = read_double_from_bin(os.path.join(kernel_dir, 'train_hyp_{}.bin'.format(pan)))
valid_pan.append(pan)
valid_hyp.append(hyp)
except:
continue
valid_pan = np.asarray(valid_pan)
valid_hyp = np.asarray(valid_hyp)
return valid_pan, valid_hyp
def load_ts_data(filename):
array = np.loadtxt(filename, dtype=float)
array = array[1:]
t = array[0::2]
v = array[1::2]
return t, v
def read_one_test_data(test_dir, test_mode, pan):
test_prefix = 'test_{}'.format(test_mode)
pan_id = '{}'.format(pan)
flag_file = os.path.join(test_dir, '{}_flag_{}.txt'.format(test_prefix, pan_id))
flag_test = np.loadtxt(flag_file, dtype=int)
flag_test = np.atleast_1d(flag_test)[0]
if(flag_test):
feature_file = os.path.join(test_dir, '{}_feature_{}.txt'.format(test_prefix, pan_id))
c_feature = | np.loadtxt(feature_file, dtype=int) | numpy.loadtxt |
import os
import sys
import re
import gzip
import tarfile
import io
import scipy
import collections
import argparse
import pandas as pd
import numpy as np
from scipy.stats import binom
from pandas.api.types import is_string_dtype
from pathlib import Path
import numbers
import warnings
class FormatError(Exception):
pass
xls = re.compile("xls")
drop = "series_matrix\.txt\.gz$|filelist\.txt$|readme|\.bam(\.tdf|$)|\.bai(\.gz|$)|\.sam(\.gz|$)|\.csfasta|\.fa(sta)?(\.gz|\.bz2|\.txt\.gz|$)|\.f(a|n)a(\.gz|$)|\.wig|\.big[Ww]ig$|\.bw(\.|$)|\.bed([Gg]raph)?(\.tdf|\.gz|\.bz2|\.txt\.gz|$)|(broad_)?lincs|\.tdf$|\.hic$|\.rds(\.gz|$)|\.tar\.gz$|\.mtx(\.gz$|$)|dge\.txt\.gz$|umis?\.txt\.gz$"
drop = re.compile(drop)
pv_str = "p[^a-zA-Z]{0,4}val"
pv = re.compile(pv_str)
adj = re.compile("adj|fdr|corr|thresh")
ws = re.compile(" ")
mtabs = re.compile("\w+\t{2,}\w+")
tab = re.compile("\t")
fields = ["Type", "Class", "Conversion", "pi0", "FDR_pval", "hist", "note"]
PValSum = collections.namedtuple("PValSum", fields, defaults=[np.nan] * len(fields))
narrowpeak = [
"chrom",
"chromStart",
"chromEnd",
"name",
"score",
"strand",
"signalValue",
"pValue",
"qValue",
"peak",
] # BED6+4
broadpeak = [
"chrom",
"chromStart",
"chromEnd",
"name",
"score",
"strand",
"signalValue",
"pValue",
"qValue",
] # BED6+3
gappedpeak = [
"chrom",
"chromStart",
"chromEnd",
"name",
"score",
"strand",
"thickStart",
"thickEnd",
"itemRgb",
"blockCount",
"blockSizes",
"blockStarts",
"signalValue",
"pValue",
"qValue",
] # BED12+3
peak = re.compile("(narrow|broad|gapped)peak")
class ImportSuppfiles(object):
def __init__(self):
self.out = {}
def from_flat(self, input, tar=None):
if drop.search(input.name.lower() if tar else input.lower()):
key = os.path.basename(input.name if tar else input)
return self.out.update(note(key, "not imported"))
else:
out = {}
try:
if xls.search(input.name if tar else input):
try:
out.update(self.read_excel(input, tar=tar))
except ValueError as e:
out.update(self.read_csv(input, tar=tar))
else:
d = self.read_csv(input, tar=tar)
is_empty = [v.empty for v in d.values()][0]
if is_empty:
raise Exception("empty table")
else:
peakfile = peak.search(
input.name.lower() if tar else input.lower()
)
if peakfile:
key = os.path.basename(input.name if tar else input)
d[key].loc[-1] = d[key].columns
d[key] = d[key].sort_index().reset_index(drop=True)
d[key].columns = eval(peakfile.group(0))
out.update(d)
except Exception as e:
key = os.path.basename(input.name if tar else input)
peakfile = peak.search(input.name.lower() if tar else input.lower())
if peakfile:
e = f"Misspecified '{peakfile.group(0)}' file; {e}"
out.update(note(key, e))
return self.out.update(out)
def from_tar(self, input):
with tarfile.open(input, "r:*") as tar:
for member in tar:
if member.isfile():
self.from_flat(member, tar)
def find_header(self, df, n=20):
head = df.head(n)
matches = [
i[0]
for i in [
[i for i, x in enumerate(head[col].str.contains(pv_str, na=False)) if x]
for col in head
]
if i
]
idx = min(matches) + 1 if matches else 0
if idx == 0:
for index, row in head.iterrows():
if all([isinstance(i, str) for i in row if i is not np.nan]):
idx = index + 1
break
return idx
def csv_helper(self, input, input_name, csv, verbose=0):
# Get comments and set rows to skip
r = pd.read_csv(csv, sep=None, engine="python", iterator=True, nrows=1000)
comment = None
sep = r._engine.data.dialect.delimiter
columns = r._engine.columns
if isinstance(input, (tarfile.ExFileObject)):
with csv as h:
first_line = h.readline()
elif input_name.endswith("gz") or isinstance(input, (gzip.GzipFile)):
with gzip.open(input) as h:
first_line = h.readline().decode("utf-8").rstrip()
else:
with open(input, "r") as h:
first_line = h.readline().rstrip()
more_tabs_than_sep = len(tab.findall(first_line)) > len(
re.findall(sep, first_line)
)
if re.search("^#", first_line) or more_tabs_than_sep:
comment = "#"
# Get delimiter
r = pd.read_csv(
csv, sep=None, engine="python", iterator=True, skiprows=20, nrows=1000
)
sep = r._engine.data.dialect.delimiter
columns = r._engine.columns
if ws.search(sep):
sep = "\s+"
if mtabs.search(first_line):
sep = "\t+"
# Import file
if isinstance(input, (tarfile.ExFileObject)) and input_name.endswith("gz"):
with gzip.open(input) as h:
df = pd.read_csv(h, sep=sep, comment=comment, encoding="unicode_escape")
else:
df = pd.read_csv(input, sep=sep, comment=comment, encoding="unicode_escape")
# Check and fix column names
# Case of extra level of delimiters in column names
if len(df.columns) > len(columns):
df = pd.read_csv(
input,
header=None,
skiprows=[0],
sep=sep,
comment=comment,
encoding="unicode_escape",
).drop([0])
df.columns = columns
unnamed = ["Unnamed" in i for i in df.columns]
# Case of empty rows before header
if all(unnamed):
idx = self.find_header(df)
if idx > 0:
df = pd.read_csv(
input,
sep=sep,
comment=comment,
skiprows=idx,
encoding="unicode_escape",
)
# Case of anonymous row names
if unnamed[-1] & sum(unnamed) == 1:
if any([pv.search(i) for i in df.columns]):
df.columns = [df.columns[-1]] + list(df.columns[:-1])
if verbose > 1:
print("df after import:\n", df)
return {os.path.basename(input_name): df}
def excel_helper(self, input, input_name, verbose=0):
tabs = {}
if input_name.endswith("gz") or isinstance(input, (gzip.GzipFile)):
excel_file = gzip.open(input)
else:
excel_file = input
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
wb = pd.ExcelFile(excel_file)
if len(wb.sheet_names) == 0:
(m,) = [i.message for i in w][0].args
raise FormatError(
f"The data source could not be successfully parsed with warning: '{m}'",
)
sheets = wb.sheet_names
sheets = [i for i in sheets if "README" not in i]
for sheet in sheets:
df = wb.parse(sheet, comment="#")
if df.empty:
df = wb.parse(sheet)
if verbose > 1:
print("df after import:\n", df)
if not df.empty:
pu = sum(["Unnamed" in i for i in list(df.columns)]) / len(df.columns)
if pu >= 2 / 3:
idx = self.find_header(df)
if idx > 0:
df = wb.parse(sheet, skiprows=idx)
tabs.update({os.path.basename(input_name) + "-sheet-" + sheet: df})
return tabs
def read_csv(self, input, tar=None):
if isinstance(input, (tarfile.TarInfo)):
input_name = os.path.basename(input.name)
with tar.extractfile(input) as h:
if input_name.endswith("gz"):
with gzip.open(h) as gz:
csv = io.StringIO(gz.read().decode("unicode_escape"))
else:
csv = io.StringIO(h.read().decode("unicode_escape"))
with tar.extractfile(input) as h:
out = self.csv_helper(h, input_name, csv)
else:
input_name = input
csv = input
out = self.csv_helper(input, input_name, csv)
return out
def read_excel(self, input, tar=None):
if isinstance(input, (tarfile.TarInfo)):
input_name = os.path.basename(input.name)
with tar.extractfile(input) as h:
out = self.excel_helper(h, input_name)
else:
input_name = input
out = self.excel_helper(input, input_name)
return out
def raw_pvalues(i):
return bool(pv.search(i.lower()) and not adj.search(i.lower()))
def filter_pvalue_tables(input, pv=None, adj=None):
return {
k: v
for k, v in input.items()
if any([raw_pvalues(i) for i in v.columns if not isinstance(i, numbers.Number)])
}
def fix_column_dtype(df):
for col in df.columns:
s = df[col]
if is_string_dtype(s):
if "," in s[:5].astype(str).str.cat(sep=" "):
df[col] = s.apply(lambda x: str(x).replace(",", "."))
df[col] = pd.to_numeric(s, errors="coerce")
return df
def summarise_pvalue_tables(
df, var=["basemean", "value", "fpkm", "logcpm", "rpkm", "aveexpr"]
):
# Drop columns with numeric column names
df = df.filter(regex="^\D")
# Drop columns with NaN column names
df = df.loc[:, df.columns.notnull()]
df.columns = map(str.lower, df.columns)
pval_cols = [i for i in df.columns if raw_pvalues(i)]
pvalues = df[pval_cols].copy()
# Check if there is ANOTHER(!!#?) level of ":" delimiters in p value column(s)
extra_delim = ":"
split_col = [i for i in pvalues.columns if extra_delim in i]
if split_col:
for index, col in enumerate(split_col):
col_count = len(re.findall(extra_delim, col))
obs_count = len(re.findall(extra_delim, str(pvalues.iloc[0, index])))
if obs_count == 0:
pass
elif col_count == obs_count:
new_cols = col.split(extra_delim)
split_pval_col = [i for i in new_cols if raw_pvalues(i)]
cols_split = pvalues.iloc[:, index].str.split(extra_delim, expand=True)
try:
cols_split.columns = new_cols
pvalues[split_pval_col] = cols_split[split_pval_col]
pvalues.drop(col, axis=1, inplace=True)
except ValueError:
pass
pval_cols = [i for i in pvalues.columns if raw_pvalues(i)]
pvalues_check = fix_column_dtype(pvalues)
for v in var:
label = v
if v == "value":
v = "^value_\d"
label = "fpkm"
exprs = df.filter(regex=v, axis=1)
if not exprs.empty:
exprs_check = fix_column_dtype(exprs)
exprs_sum = exprs_check.mean(axis=1, skipna=True)
pvalues_check.loc[:, label] = exprs_sum
break
pv_stacked = (
pvalues_check.melt(id_vars=list(set(pvalues_check.columns) - set(pval_cols)))
.set_index("variable")
.rename(columns={"value": "pvalue"})
)
return pv_stacked.dropna()
# https://stackoverflow.com/a/32681075/1657871
def rle(inarray):
"""run length encoding. Partial credit to R rle function.
Multi datatype arrays catered for including non Numpy
returns: tuple (runlengths, startpositions, values)"""
ia = | np.asarray(inarray) | numpy.asarray |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import numpy as np
import matplotlib.pyplot as plt
import abel
import scipy.misc
from scipy.ndimage.interpolation import shift
from scipy.ndimage import zoom
import bz2
# This example demonstrates both Hansen and Law inverse Abel transform
# and basex for an image obtained using a velocity map imaging (VMI)
# photoelecton spectrometer to record the photoelectron angular distribution
# resulting from photodetachement of O2- at 454 nm.
# This spectrum was recorded in 2010
# ANU / The Australian National University
# J. Chem. Phys. 133, 174311 (2010) DOI: 10.1063/1.3493349
#
# Note the image zoomed to reduce calculation time
# Specify the path to the file
filename = os.path.join('data', 'O2-ANU1024.txt.bz2')
# Name the output files
base_dir, name = os.path.split(filename)
name = name.split('.')[0]
output_image = name + '_inverse_Abel_transform_HansenLaw.png'
output_text = name + '_speeds_HansenLaw.dat'
output_plot = 'plot_' + name + '_comparison_HansenLaw.png'
# Load an image file as a numpy array
print('Loading ' + filename)
imagefile = bz2.BZ2File(filename)
im = | np.loadtxt(imagefile) | numpy.loadtxt |
from plyfile import PlyData, PlyElement
import open3d as o3d
from pyobb.obb import OBB
import numpy as np
import os
from scipy.spatial import ConvexHull, convex_hull_plot_2d
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
import argparse
import utils
def obb_calc(filename, gravity= | np.array((0.0,1.0,0.0)) | numpy.array |
import numpy as np
import scipy.interpolate as ipl
import scipy.signal as sp
from .common import *
class Processor:
def __init__(self, sr):
self.samprate = float(sr)
def simple(self, timeList, f0List, hPhaseList = None, vAdditional = (), vuvAdditional = ()):
nHop = len(f0List)
nNewHop = len(timeList)
newF0List = np.zeros(nNewHop)
if(hPhaseList is not None):
newHPhaseList = np.zeros((nNewHop, hPhaseList.shape[1]))
newVAdditional, newVUVAdditional = [], []
vAdditional, vuvAdditional = list(vAdditional), list(vuvAdditional)
for iItem, item in enumerate(vAdditional):
if(isinstance(item, tuple)):
assert(len(item) == 2)
item, default = item
vAdditional[iItem] = item
else:
default = 0.0
newItem = np.full((nNewHop, *item.shape[1:]), default)
newVAdditional.append(newItem)
for iItem, item in enumerate(vuvAdditional):
if(isinstance(item, tuple)):
assert(len(item) == 2)
item, default = item
vuvAdditional[iItem] = item
else:
default = 0.0
newItem = np.full((nNewHop, *item.shape[1:]), default)
newVUVAdditional.append(newItem)
# v
segments = splitArray(f0List)
segBeginHop = 0
newHopIndexList = np.arange(nNewHop)
for iSegment, segment in enumerate(segments):
if(segment[0] > 0.0):
segEndHop = segBeginHop + len(segment)
newHopIndexSlice = newHopIndexList[ | np.logical_and(timeList >= segBeginHop, timeList <= segEndHop) | numpy.logical_and |
from typing import Tuple
import numpy as np
from PIL import Image
def one_color(mode: str, size: Tuple[int, int], color: Tuple[int, int, int]) -> Image:
return Image.new(mode, size, color)
def two_tone_horizontal(mode: str, w: int, h: int, split: int, color1: Tuple[int, int, int],
color2: Tuple[int, int, int]) -> Image:
arr = np.full((h, w, 3), color1, dtype=np.uint8)
arr[:, split:] = color2
return Image.fromarray(arr, mode)
def two_tone_vertical(mode: str, w: int, h: int, split: int, color1: Tuple[int, int, int],
color2: Tuple[int, int, int]) -> Image:
arr = np.full((h, w, 3), color1, dtype=np.uint8)
arr[split:, :] = color2
return Image.fromarray(arr, mode)
def gradient_horizontal(mode: str, w: int, h: int, color1: Tuple[int, int, int],
color2: Tuple[int, int, int]) -> Image:
gradient = np.array([
| np.linspace(color_component1, color_component2, w) | numpy.linspace |
"""
Script for plotting differences in models for select variables over the
1950-2019 period
Author : <NAME>
Date : 15 December 2021
Version : 7 - adds validation data for early stopping
"""
### Import packages
import sys
import math
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import palettable.scientific.sequential as sss
import cmocean as cmocean
import calc_Utilities as UT
import calc_dataFunctions as df
import calc_Stats as dSS
import scipy.stats as sts
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
modelGCMs = ['CanESM2','MPI','CSIRO-MK3.6','KNMI-ecearth','GFDL-CM3','GFDL-ESM2M','LENS']
modelGCMsNames = ['CanESM2','MPI','CSIRO-MK3.6','KNMI-ecearth','GFDL-CM3','GFDL-ESM2M','LENS','MMmean']
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m"]
datasetsingle = ['SMILE']
monthlychoiceq = ['JFM','AMJ','JAS','OND','annual']
variables = ['T2M','P','SLP']
reg_name = 'SMILEGlobe'
level = 'surface'
monthlychoiceq = ['annual']
variables = ['T2M']
timeper = 'historical'
###############################################################################
###############################################################################
land_only = False
ocean_only = False
###############################################################################
###############################################################################
baseline = np.arange(1951,1980+1,1)
###############################################################################
###############################################################################
window = 0
yearsall = np.arange(1950+window,2019+1,1)
###############################################################################
###############################################################################
numOfEns = 16
lentime = len(yearsall)
###############################################################################
###############################################################################
dataset = datasetsingle[0]
lat_bounds,lon_bounds = UT.regions(reg_name)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
lensalso = True
randomalso = False
shuffletype = 'none'
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Read in model data
def read_primary_dataset(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
### Call functions
for vv in range(len(variables)):
for mo in range(len(monthlychoiceq)):
variq = variables[vv]
monthlychoice = monthlychoiceq[mo]
directoryfigure = '/Users/zlabe/Desktop/ModelComparison_v1/Climatologies/interModel/%s/' % variq
saveData = monthlychoice + '_' + variq + '_' + reg_name
print('*Filename == < %s >' % saveData)
### Read data
models,lats,lons = read_primary_dataset(variq,dataset,monthlychoice,numOfEns,
lensalso,randomalso,ravelyearsbinary,
ravelbinary,shuffletype,timeper,
lat_bounds,lon_bounds)
### Calculate ensemble mean
ensmean = np.nanmean(models[:,:,:,:,:],axis=1)
### Calculate multimodel mean
modmean = np.nanmean(models[:,:,:,:,:],axis=0)
### Calculate difference from multimodelmean
diffmod = models - modmean
diffmodensm = np.nanmean(diffmod[:,:,:,:,:],axis=1)
diffmodmean = np.nanmean(diffmodensm[:,:,:,:],axis=1)
### Calculate different between each model
# intermodel = np.empty((models.shape[0],models.shape[0],models.shape[1],
# models.shape[2],models.shape[3],models.shape[4]))
# for mm in range(models.shape[0]):
# for ea in range(models.shape[0]):
# intermodel[mm,ea,:,:,:,:] = models[mm,:,:,:,:] - models[ea,:,:,:,:]
# ensmeanintermodel = np.nanmean(intermodel[:,:,:,:,:,:],axis=2)
# timeensmeanintermodel = np.nanmean(ensmeanintermodel[:,:,:,:,:],axis=2)
###############################################################################
###############################################################################
###############################################################################
#######################################################################
#######################################################################
#######################################################################
### Plot subplot of different from multimodel mean
if variq == 'T2M':
limit = np.arange(-6,6.01,0.25)
barlim = np.round(np.arange(-6,7,2),2)
cmap = cmocean.cm.balance
label = r'\textbf{%s -- [$^{\circ}$C MMmean difference] -- 1950-2019}' % variq
elif variq == 'P':
limit = np.arange(-3,3.01,0.01)
barlim = np.round(np.arange(-3,3.1,1),2)
cmap = cmocean.cm.tarn
label = r'\textbf{%s -- [mm/day MMmean difference] -- 1950-2019}' % variq
elif variq == 'SLP':
limit = np.arange(-5,5.1,0.25)
barlim = np.round(np.arange(-5,6,1),2)
cmap = cmocean.cm.diff
label = r'\textbf{%s -- [hPa MMmean difference] -- 1950-2019}' % variq
fig = plt.figure(figsize=(8,4))
for r in range(len(diffmodmean)):
var = diffmodmean[r]
ax1 = plt.subplot(2,4,r+2)
m = Basemap(projection='moll',lon_0=0,resolution='l',area_thresh=10000)
m.drawcoastlines(color='dimgrey',linewidth=0.27)
var, lons_cyclic = addcyclic(var, lons)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
circle = m.drawmapboundary(fill_color='white',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,var,limit,extend='both')
cs1.set_cmap(cmap)
ax1.annotate(r'\textbf{%s}' % modelGCMs[r],xy=(0,0),xytext=(0.5,1.10),
textcoords='axes fraction',color='dimgrey',fontsize=8,
rotation=0,ha='center',va='center')
ax1.annotate(r'\textbf{[%s]}' % letters[r],xy=(0,0),xytext=(0.86,0.97),
textcoords='axes fraction',color='k',fontsize=6,
rotation=330,ha='center',va='center')
###############################################################################
cbar_ax1 = fig.add_axes([0.36,0.11,0.3,0.03])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=9,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=5)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(top=0.85,wspace=0.02,hspace=0.00,bottom=0.14)
plt.savefig(directoryfigure + 'MultiModelBias-%s_ALL.png' % saveData,dpi=300)
directorydataMS = '/Users/zlabe/Documents/Research/ModelComparison/Data/RevisitResults_v7/'
np.save(directorydataMS + 'MMMeandifferences_7models.npy',diffmodmean)
###############################################################################
###############################################################################
###############################################################################
fig = plt.figure(figsize=(10,2))
for r in range(len(diffmodmean)+1):
if r < 7:
var = diffmodmean[r]
else:
var = np.empty((lats.shape[0],lons.shape[0]))
var[:] = np.nan
ax1 = plt.subplot(1,len(diffmodmean)+1,r+1)
m = Basemap(projection='npstere',boundinglat=65,lon_0=0,
resolution='l',round =True,area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
var, lons_cyclic = addcyclic(var, lons)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,var,limit,extend='both')
cs1.set_cmap(cmap)
if ocean_only == True:
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
elif land_only == True:
m.drawlsmask(land_color=(0,0,0,0),ocean_color='darkgrey',lakes=True,zorder=5)
ax1.annotate(r'\textbf{%s}' % modelGCMsNames[r],xy=(0,0),xytext=(0.5,1.10),
textcoords='axes fraction',color='dimgrey',fontsize=8,
rotation=0,ha='center',va='center')
ax1.annotate(r'\textbf{[%s]}' % letters[r],xy=(0,0),xytext=(0.86,0.97),
textcoords='axes fraction',color='k',fontsize=6,
rotation=330,ha='center',va='center')
###############################################################################
cbar_ax1 = fig.add_axes([0.36,0.13,0.3,0.03])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=9,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=5)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(top=0.85,wspace=0.02,hspace=0.02,bottom=0.14)
plt.savefig(directoryfigure + 'MultiModelBias-%s_ALL-Arctic.png' % saveData,dpi=300)
###############################################################################
###############################################################################
###############################################################################
if variq == 'T2M':
limit = np.arange(-3,3.01,0.2)
barlim = np.round(np.arange(-3,4,1),2)
cmap = cmocean.cm.balance
label = r'\textbf{%s -- [$^{\circ}$C MMmean difference] -- 1950-2019}' % variq
elif variq == 'P':
limit = np.arange(-3,3.01,0.01)
barlim = np.round(np.arange(-3,3.1,1),2)
cmap = cmocean.cm.tarn
label = r'\textbf{%s -- [mm/day MMmean difference] -- 1950-2019}' % variq
elif variq == 'SLP':
limit = np.arange(-5,5.1,0.25)
barlim = np.round(np.arange(-5,6,1),2)
cmap = cmocean.cm.diff
label = r'\textbf{%s -- [hPa MMmean difference] -- 1950-2019}' % variq
fig = plt.figure(figsize=(10,2))
for r in range(len(diffmodmean)+1):
if r < 7:
var = diffmodmean[r]
else:
var = np.empty((lats.shape[0],lons.shape[0]))
var[:] = np.nan
ax1 = plt.subplot(1,len(diffmodmean)+1,r+1)
m = Basemap(projection='moll',lon_0=0,resolution='l',area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
var, lons_cyclic = addcyclic(var, lons)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,var,limit,extend='both')
cs1.set_cmap(cmap)
if ocean_only == True:
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
elif land_only == True:
m.drawlsmask(land_color=(0,0,0,0),ocean_color='darkgrey',lakes=True,zorder=5)
ax1.annotate(r'\textbf{%s}' % modelGCMsNames[r],xy=(0,0),xytext=(0.5,1.10),
textcoords='axes fraction',color='dimgrey',fontsize=8,
rotation=0,ha='center',va='center')
ax1.annotate(r'\textbf{[%s]}' % letters[r],xy=(0,0),xytext=(0.86,0.97),
textcoords='axes fraction',color='k',fontsize=6,
rotation=330,ha='center',va='center')
###############################################################################
cbar_ax1 = fig.add_axes([0.36,0.13,0.3,0.03])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=9,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=5)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(top=0.85,wspace=0.02,hspace=0.02,bottom=0.14)
plt.savefig(directoryfigure + 'MultiModelBias-%s_ALL-StyleGlobe.png' % saveData,dpi=300)
###############################################################################
###############################################################################
###############################################################################
if variq == 'T2M':
limit = np.arange(-3,3.01,0.2)
barlim = np.round(np.arange(-3,4,1),2)
cmap = cmocean.cm.balance
label = r'\textbf{%s -- [$^{\circ}$C MMmean difference] -- 1950-2019}' % variq
elif variq == 'P':
limit = np.arange(-3,3.01,0.01)
barlim = np.round(np.arange(-3,3.1,1),2)
cmap = cmocean.cm.tarn
label = r'\textbf{%s -- [mm/day MMmean difference] -- 1950-2019}' % variq
elif variq == 'SLP':
limit = np.arange(-5,5.1,0.25)
barlim = np.round(np.arange(-5,6,1),2)
cmap = cmocean.cm.diff
label = r'\textbf{%s -- [hPa MMmean difference] -- 1950-2019}' % variq
fig = plt.figure(figsize=(10,2))
for r in range(len(diffmodmean)+1):
if r < 7:
var = diffmodmean[r]
else:
var = np.empty((lats.shape[0],lons.shape[0]))
var[:] = np.nan
latq = np.where((lats >= -20) & (lats <= 20))[0]
latsqq = lats[latq]
var = var[latq,:]
ax1 = plt.subplot(1,len(diffmodmean)+1,r+1)
m = Basemap(projection='moll',lon_0=0,resolution='l',area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
var, lons_cyclic = addcyclic(var, lons)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, latsqq)
x, y = m(lon2d, lat2d)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,var,limit,extend='both')
cs1.set_cmap(cmap)
if ocean_only == True:
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
elif land_only == True:
m.drawlsmask(land_color=(0,0,0,0),ocean_color='darkgrey',lakes=True,zorder=5)
ax1.annotate(r'\textbf{%s}' % modelGCMsNames[r],xy=(0,0),xytext=(0.5,1.10),
textcoords='axes fraction',color='dimgrey',fontsize=8,
rotation=0,ha='center',va='center')
ax1.annotate(r'\textbf{[%s]}' % letters[r],xy=(0,0),xytext=(0.86,0.97),
textcoords='axes fraction',color='k',fontsize=6,
rotation=330,ha='center',va='center')
###############################################################################
cbar_ax1 = fig.add_axes([0.36,0.13,0.3,0.03])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=9,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=5)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(top=0.85,wspace=0.02,hspace=0.02,bottom=0.14)
plt.savefig(directoryfigure + 'MultiModelBias-%s_ALL-Tropics.png' % saveData,dpi=300)
###############################################################################
###############################################################################
###############################################################################
#######################################################################
#######################################################################
#######################################################################
### Plot subplot of different from multimodel mean
if variq == 'T2M':
limit = np.arange(-6,6.01,0.25)
barlim = np.round(np.arange(-6,7,2),2)
cmap = cmocean.cm.balance
label = r'\textbf{%s -- [$^{\circ}$C difference] -- 1950-2019}' % variq
elif variq == 'P':
limit = np.arange(-3,3.01,0.01)
barlim = np.round( | np.arange(-3,3.1,1) | numpy.arange |
# The code is based on original repository https://github.com/OctoberChang/klcpd_code
# !/usr/bin/env python
# encoding: utf-8
import math
import numpy as np
import random
import sklearn.metrics
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from sklearn.metrics.pairwise import euclidean_distances
from .data import HankelDataset
from types import SimpleNamespace
from tqdm import trange
from torch.utils.data import DataLoader
def median_heuristic(X, beta=0.5):
max_n = min(30000, X.shape[0])
D2 = euclidean_distances(X[:max_n], squared=True)
med_sqdist = np.median(D2[np.triu_indices_from(D2, k=1)])
beta_list = [beta ** 2, beta ** 1, 1, (1.0 / beta) ** 1, (1.0 / beta) ** 2]
return [med_sqdist * b for b in beta_list]
class NetG(nn.Module):
def __init__(self, var_dim, RNN_hid_dim, num_layers: int = 1):
super().__init__()
self.var_dim = var_dim
self.RNN_hid_dim = RNN_hid_dim
self.rnn_enc_layer = nn.GRU(self.var_dim, self.RNN_hid_dim, num_layers=num_layers, batch_first=True)
self.rnn_dec_layer = nn.GRU(self.var_dim, self.RNN_hid_dim, num_layers=num_layers, batch_first=True)
self.fc_layer = nn.Linear(self.RNN_hid_dim, self.var_dim)
# X_p: batch_size x wnd_dim x var_dim (Encoder input)
# X_f: batch_size x wnd_dim x var_dim (Decoder input)
# h_t: 1 x batch_size x RNN_hid_dim
# noise: 1 x batch_size x RNN_hid_dim
def forward(self, X_p, X_f, noise):
X_p_enc, h_t = self.rnn_enc_layer(X_p)
X_f_shft = self.shft_right_one(X_f)
hidden = h_t + noise
Y_f, _ = self.rnn_dec_layer(X_f_shft, hidden)
output = self.fc_layer(Y_f)
return output
def shft_right_one(self, X):
X_shft = X.clone()
X_shft[:, 0, :].data.fill_(0)
X_shft[:, 1:, :] = X[:, :-1, :]
return X_shft
class NetD(nn.Module):
def __init__(self, var_dim, RNN_hid_dim, num_layers: int = 1):
super(NetD, self).__init__()
self.var_dim = var_dim
self.RNN_hid_dim = RNN_hid_dim
self.rnn_enc_layer = nn.GRU(self.var_dim, self.RNN_hid_dim, num_layers=num_layers, batch_first=True)
self.rnn_dec_layer = nn.GRU(self.RNN_hid_dim, self.var_dim, num_layers=num_layers, batch_first=True)
def forward(self, X):
X_enc, _ = self.rnn_enc_layer(X)
X_dec, _ = self.rnn_dec_layer(X_enc)
return X_enc, X_dec
class KL_CPD(nn.Module):
def __init__(self, D: int, critic_iters: int = 5,
lambda_ae: float = 0.001, lambda_real: float = 0.1,
p_wnd_dim: int = 25, f_wnd_dim: int = 10, sub_dim: int = 1, RNN_hid_dim: int = 10):
super().__init__()
self.p_wnd_dim = p_wnd_dim
self.f_wnd_dim = f_wnd_dim
self.sub_dim = sub_dim
self.D = D
self.var_dim = D * sub_dim
self.critic_iters = critic_iters
self.lambda_ae, self.lambda_real = lambda_ae, lambda_real
self.RNN_hid_dim = RNN_hid_dim
self.netD = NetD(self.var_dim, RNN_hid_dim)
self.netG = NetG(self.var_dim, RNN_hid_dim)
@property
def device(self):
return next(self.parameters()).device
def __mmd2_loss(self, X_p_enc, X_f_enc):
sigma_var = self.sigma_var
# some constants
n_basis = 1024
gumbel_lmd = 1e+6
cnst = math.sqrt(1. / n_basis)
n_mixtures = sigma_var.size(0)
n_samples = n_basis * n_mixtures
batch_size, seq_len, nz = X_p_enc.size()
# gumbel trick to get masking matrix to uniformly sample sigma
# input: (batch_size*n_samples, nz)
# output: (batch_size, n_samples, nz)
def sample_gmm(W, batch_size):
U = torch.FloatTensor(batch_size * n_samples, n_mixtures).uniform_().to(self.device)
sigma_samples = F.softmax(U * gumbel_lmd).matmul(sigma_var)
W_gmm = W.mul(1. / sigma_samples.unsqueeze(1))
W_gmm = W_gmm.view(batch_size, n_samples, nz)
return W_gmm
W = Variable(torch.FloatTensor(batch_size * n_samples, nz).normal_(0, 1).to(self.device))
W_gmm = sample_gmm(W, batch_size) # batch_size x n_samples x nz
W_gmm = torch.transpose(W_gmm, 1, 2).contiguous() # batch_size x nz x n_samples
XW_p = torch.bmm(X_p_enc, W_gmm) # batch_size x seq_len x n_samples
XW_f = torch.bmm(X_f_enc, W_gmm) # batch_size x seq_len x n_samples
z_XW_p = cnst * torch.cat((torch.cos(XW_p), torch.sin(XW_p)), 2)
z_XW_f = cnst * torch.cat((torch.cos(XW_f), torch.sin(XW_f)), 2)
batch_mmd2_rff = torch.sum((z_XW_p.mean(1) - z_XW_f.mean(1)) ** 2, 1)
return batch_mmd2_rff
def forward(self, X_p: torch.Tensor, X_f: torch.Tensor):
batch_size = X_p.size(0)
X_p_enc, _ = self.netD(X_p)
X_f_enc, _ = self.netD(X_f)
Y_pred_batch = self.__mmd2_loss(X_p_enc, X_f_enc)
return Y_pred_batch
def predict(self, ts):
dataset = HankelDataset(ts, self.p_wnd_dim, self.f_wnd_dim, self.sub_dim)
dataloader = DataLoader(dataset, batch_size=128, shuffle=False)
preds = []
with torch.no_grad():
for batch in dataloader:
X_p, X_f = [batch[key].float().to(self.device) for key in ['X_p', 'X_f']]
preds.append(self.forward(X_p, X_f).detach().cpu().numpy())
return np.concatenate(preds)
def fit(self, ts, epoches: int = 100, lr: float = 3e-4, weight_clip: float = .1, weight_decay: float = 0.,
momentum: float = 0.):
# must be defined in fit() method
optG = torch.optim.AdamW(self.netG.parameters(), lr=lr, weight_decay=weight_decay)
optD = torch.optim.AdamW(self.netD.parameters(), lr=lr, weight_decay=weight_decay)
dataset = HankelDataset(ts, self.p_wnd_dim, self.f_wnd_dim, self.sub_dim)
dataloader = DataLoader(dataset, batch_size=64, shuffle=True)
sigma_list = median_heuristic(dataset.Y_hankel, beta=.5)
self.sigma_var = torch.FloatTensor(sigma_list).to(self.device)
tbar = trange(epoches, disable=True)
for epoch in tbar:
for batch in dataloader:
# Fit critic
for p in self.netD.parameters():
p.requires_grad = True
for p in self.netD.rnn_enc_layer.parameters():
p.data.clamp_(-weight_clip, weight_clip)
self._optimizeD(batch, optD)
if np.random.choice( | np.arange(self.critic_iters) | numpy.arange |
"""
https://github.com/numpy/numpy/blob/v1.19.0/numpy/lib/histograms.py
Histogram-related functions
"""
import functools
import operator
import warnings
import numpy as np
from numpy.core import overrides
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
def _ptp(x):
"""Peak-to-peak value of x.
This implementation avoids the problem of signed integer arrays having a
peak-to-peak value that cannot be represented with the array's data type.
This function returns an unsigned value for signed integer arrays.
"""
return _unsigned_subtract(x.max(), x.min())
def _hist_bin_sqrt(*input_data):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
x = input_data[0]
return _ptp(x) / np.sqrt(x.size)
def _hist_bin_sturges(*input_data):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
x = input_data[0]
return _ptp(x) / (np.log2(x.size) + 1.0)
def _hist_bin_rice(*input_data):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
x = input_data[0]
return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(*input_data):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
x = input_data[0]
return (24.0 * np.pi ** 0.5 / x.size) ** (1.0 / 3.0) * np.std(x)
def _hist_bin_stone(*input_data):
"""
Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
This paper by Stone appears to be the origination of this rule.
http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
range : (float, float)
The lower and upper range of the bins.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
x, range = input_data
n = x.size
ptp_x = _ptp(x)
if n <= 1 or ptp_x == 0:
return 0
def jhat(nbins):
hh = ptp_x / nbins
p_k = np.histogram(x, bins=nbins, range=range)[0] / n
return (2 - (n + 1) * p_k.dot(p_k)) / hh
nbins_upper_bound = max(100, int(np.sqrt(n)))
nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
if nbins == nbins_upper_bound:
warnings.warn("The number of bins estimated may be suboptimal.",
RuntimeWarning, stacklevel=3)
return ptp_x / nbins
def _hist_bin_doane(*input_data):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
x = input_data[0]
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return _ptp(x) / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(*input_data):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 0 for the bin width.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
x = input_data[0]
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(*input_data):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
If the bin width from the FD estimator is 0, the Sturges estimator is used.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
and is the default in the R language. This method gives good off-the-shelf
behaviour.
.. versionchanged:: 1.15.0
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
If the IQR is 0, it's unlikely any variance-based estimators will be of
use, so we revert to the Sturges estimator, which only uses the size of the
dataset in its calculation.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
x, range = input_data
fd_bw = _hist_bin_fd(x, range)
sturges_bw = _hist_bin_sturges(x, range)
#del range # unused
if fd_bw:
return min(fd_bw, sturges_bw)
else:
# limited variance, so we return a len dependent bw estimator
return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'stone': _hist_bin_stone,
'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = np.asarray(a)
# Ensure that the array is a "subtractable" dtype
if a.dtype == np.bool_:
warnings.warn("Converting input from {} to {} for compatibility."
.format(a.dtype, np.uint8),
RuntimeWarning, stacklevel=3)
a = a.astype(np.uint8)
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _get_outer_edges(a, range):
"""
Determine the outer bin edges to use, from either the data or the range
argument
"""
if range is not None:
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _unsigned_subtract(a, b):
"""
Subtract two values where a >= b, and produce an unsigned result
This is needed when finding the difference between the upper and lower
bound of an int16 histogram
"""
# coerce to a single type
signed_to_unsigned = {
np.byte: np.ubyte,
np.short: np.ushort,
np.intc: np.uintc,
np.int_: np.uint,
np.longlong: np.ulonglong
}
dt = np.result_type(a, b)
try:
dt = signed_to_unsigned[dt.type]
except KeyError:
return np.subtract(a, b, dtype=dt)
else:
# we know the inputs are integers, and we are deliberately casting
# signed to unsigned
return | np.subtract(a, b, casting='unsafe', dtype=dt) | numpy.subtract |
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
import os,os.path
import re
from numpy.lib.recfunctions import append_fields
from . import localpath
class SN1a_feedback(object):
def __init__(self):
"""
this is the object that holds the feedback table for SN1a
.masses gives a list of masses
.metallicities gives a list of possible yield metallicities
.elements gives the elements considered in the yield table
.table gives a dictionary where the yield table for a specific metallicity can be queried
.table[0.02] gives a yield table.
Keys of this object are ['Mass','mass_in_remnants','elements']
Mass is in units of Msun
'mass_in_remnants' in units of Msun but with a '-'
'elements' yield in Msun normalised to Mass. i.e. integral over all elements is unity
"""
def Seitenzahl(self):
"""
Seitenzahl 2013 from Ivo txt
"""
y = np.genfromtxt(localpath + 'input/yields/Seitenzahl2013/0.02.txt', names = True, dtype = None)
self.metallicities = list([0.02])
self.masses = list([1.4004633930489443])
names = list(y.dtype.names)
self.elements = names[2:]
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = np.divide(y[name],self.masses)
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Thielemann(self):
"""
Thilemann 2003 yields as compiled in Travaglio 2004
"""
y = np.genfromtxt(localpath + 'input/yields/Thielemann2003/0.02.txt', names = True, dtype = None)
metallicity_list = [0.02]
self.metallicities = metallicity_list
self.masses = [1.37409]
names = y.dtype.names
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = np.divide(y[name],self.masses)
self.elements = list(y.dtype.names[2:])
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Iwamoto(self):
'''
Iwamoto99 yields building up on Nomoto84
'''
import numpy.lib.recfunctions as rcfuncs
tdtype = [('species1','|S4'),('W7',float),('W70',float),('WDD1',float),('WDD2',float),('WDD3',float),('CDD1',float),('CDD2',float)]
metallicity_list = [0.02,0.0]
self.metallicities = metallicity_list
self.masses = [1.38]
y = np.genfromtxt(localpath + 'input/yields/Iwamoto/sn1a_yields.txt',dtype = tdtype, names = None)
## Python3 need transformation between bytes and strings
element_list2 = []
for j,jtem in enumerate(y['species1']):
element_list2.append(jtem.decode('utf8'))
y = rcfuncs.append_fields(y,'species',element_list2,usemask = False)
################################
without_radioactive_isotopes=True
if without_radioactive_isotopes:### without radioactive isotopes it should be used this way because the radioactive nuclides are already calculated in here
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne']#,'22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg']#,'26Al']
aluminium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar']#, '36Cl']
potassium_list = ['39K','41K']#, '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca']#, '40K']
scandium_list = ['45Sc']#,'44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti']#,'48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr']#,'53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe']#,'56Co','57Co']
cobalt_list = ['59Co']#,'60Fe','56Ni','57Ni','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni']#,'60Co']
copper_list = ['63Cu','65Cu']#,'63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
##### with radioactive isotopes (unclear weather they are double, probably not but remnant mass is too big)
else:
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne','22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg','26Al']
aluminium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar', '36Cl']
potassium_list = ['39K','41K', '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca', '40K']
scandium_list = ['45Sc','44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti','48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr','53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe','56Co','57Co','56Ni','57Ni']
cobalt_list = ['59Co','60Fe','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni','60Co']
copper_list = ['63Cu','65Cu','63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
indexing = {}
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
self.elements = list(indexing.keys())
#################################
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(metallicity_list[:]):
if metallicity == 0.02:
model = 'W7'
elif metallicity == 0.0:
model = 'W70'
else:
print('this metallicity is not represented in the Iwamoto yields. They only have solar (0.02) and zero (0.0001)')
additional_keys = ['Mass', 'mass_in_remnants']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses[0]
total_mass = []
for i,item in enumerate(self.elements):
for j,jtem in enumerate(indexing[item]):
cut = np.where(y['species']==jtem)
yield_tables_final_structure_subtable[item] += y[model][cut]
total_mass.append(y[model][cut])
yield_tables_final_structure_subtable['mass_in_remnants'] = -sum(total_mass)
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = np.divide(yield_tables_final_structure_subtable[item],-yield_tables_final_structure_subtable['mass_in_remnants'])
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
class SN2_feedback(object):
def __init__(self):
"""
This is the object that holds the feedback table for CC-SN.
Different tables can be loaded by the methods.
"""
def Portinari(self):
'''
Loading the yield table from Portinari1998.
'''
self.metallicities = [0.0004,0.004,0.008,0.02,0.05]
x = np.genfromtxt(localpath + 'input/yields/Portinari_1998/0.02.txt',names=True)
self.masses = list(x['Mass'])
self.elements = list(x.dtype.names[3:])
yield_tables_final_structure = {}
for metallicity in self.metallicities:
additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = np.array(self.masses)
x = np.genfromtxt(localpath + 'input/yields/Portinari_1998/%s.txt' %(metallicity),names=True)
for item in self.elements:
yield_tables_final_structure_subtable[item] = np.divide(x[item],x['Mass'])
yield_tables_final_structure_subtable['mass_in_remnants'] = np.divide(x['Mass'] - x['ejected_mass'], x['Mass'])
for i,item in enumerate(self.masses):
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][i] = 1. - (yield_tables_final_structure_subtable['mass_in_remnants'][i] + sum(list(yield_tables_final_structure_subtable[self.elements][i])))
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def francois(self):
'''
Loading the yield table of Francois et. al. 2004. Taken from the paper table 1 and 2 and added O H He from WW95 table 5A and 5B
where all elements are for Z=Zsun and values for Msun > 40 have been stayed the same as for Msun=40.
Values from 11-25 Msun used case A from WW95 and 30-40 Msun used case B.
'''
y = np.genfromtxt(localpath + 'input/yields/Francois04/francois_yields.txt',names=True)
self.elements = list(y.dtype.names[1:])
self.masses = y[y.dtype.names[0]]
self.metallicities = [0.02]
######### going from absolute ejected masses to relative ejected masses normed with the weight of the initial star
for i,item in enumerate(y.dtype.names[1:]):
y[item] = np.divide(y[item],y['Mass'])
yield_tables = {}
for i,item in enumerate(self.metallicities):
yield_tables[item] = y
self.table = yield_tables
def chieffi04(self):
'''
Loading the yield table of chieffi04.
'''
DATADIR = localpath + 'input/yields/Chieffi04'
if not os.path.exists(DATADIR):
os.mkdir(DATADIR)
MASTERFILE = '{}/chieffi04_yields'.format(DATADIR)
def _download_chieffi04():
"""
Downloads chieffi 04 yields from Vizier.
"""
url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'
import urllib
print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')
if os.path.exists(MASTERFILE):
os.remove(MASTERFILE)
urllib.urlretrieve(url,MASTERFILE)
import tarfile
tar = tarfile.open(MASTERFILE)
tar.extractall(path=DATADIR)
tar.close()
if not os.path.exists(MASTERFILE):
_download_chieffi04()
tdtype = [('metallicity',float),('date_after_explosion',float),('species','|S5'),('13',float),('15',float),('20',float),('25',float),('30',float),('35',float)]
y = np.genfromtxt('%s/yields.dat' %(DATADIR), dtype = tdtype, names = None)
metallicity_list = np.unique(y['metallicity'])
self.metallicities = np.sort(metallicity_list)
number_of_species = int(len(y)/len(self.metallicities))
tables = []
for i, item in enumerate(self.metallicities):
tables.append(y[(i*number_of_species):((i+1)*number_of_species)])
#############################################
for i in range(len(tables)):
tables[i] = tables[i][np.where(tables[i]['date_after_explosion']==0)]
element_list = tables[0]['species'][3:]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for i, item in enumerate(element_list):
element_list2.append(item.decode('utf8'))
element_list = np.array(element_list2)
indexing = [re.split(r'(\d+)', s)[1:] for s in element_list]
element_position = []
for i,item in enumerate(element_list):
element_position.append(indexing[i][1])
self.elements = list(np.unique(element_position))
masses = tables[0].dtype.names[3:]
masses_list = []
for i,item in enumerate(masses):
masses_list.append(int(item))
self.masses = masses_list
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = tables[metallicity_index]
additional_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = np.array(self.masses)
for j,jtem in enumerate(self.masses):
yield_tables_final_structure_subtable['mass_in_remnants'][j] = yields_for_one_metallicity[str(jtem)][1] / float(jtem) # ,yield_tables_final_structure_subtable['Mass'][i])
for i,item in enumerate(self.elements):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
for t,ttem in enumerate(element_position):
if ttem == item:
yield_tables_final_structure_subtable[item][j] += yields_for_one_metallicity[str(jtem)][t+3] / float(jtem)
# remnant + yields of all elements is less than the total mass. In the next loop the wind mass is calculated.
name_list = list(yield_tables_final_structure_subtable.dtype.names[3:]) + ['mass_in_remnants']
for i in range(len(yield_tables_final_structure_subtable)):
tmp = []
for j,jtem in enumerate(name_list):
tmp.append(yield_tables_final_structure_subtable[jtem][i])
tmp = sum(tmp)
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][i] = 1 - tmp
yield_tables_final_structure[self.metallicities[metallicity_index]] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def chieffi04_net(self):
'''
Loading the yield table of chieffi04 corrected for Anders & Grevesse 1989 solar scaled initial yields
'''
DATADIR = localpath + 'input/yields/Chieffi04'
if not os.path.exists(DATADIR):
os.mkdir(DATADIR)
MASTERFILE = '{}/chieffi04_yields'.format(DATADIR)
def _download_chieffi04():
"""
Downloads chieffi 04 yields from Vizier.
"""
url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'
import urllib
print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')
if os.path.exists(MASTERFILE):
os.remove(MASTERFILE)
urllib.urlretrieve(url,MASTERFILE)
import tarfile
tar = tarfile.open(MASTERFILE)
tar.extractall(path=DATADIR)
tar.close()
if not os.path.exists(MASTERFILE):
_download_chieffi04()
tdtype = [('metallicity',float),('date_after_explosion',float),('species','|S5'),('13',float),('15',float),('20',float),('25',float),('30',float),('35',float)]
y = np.genfromtxt('%s/yields.dat' %(DATADIR), dtype = tdtype, names = None)
metallicity_list = np.unique(y['metallicity'])
self.metallicities = np.sort(metallicity_list)
number_of_species = int(len(y)/len(self.metallicities))
tables = []
for i, item in enumerate(self.metallicities):
tables.append(y[(i*number_of_species):((i+1)*number_of_species)])
#############################################
for i in range(len(tables)):
tables[i] = tables[i][np.where(tables[i]['date_after_explosion']==0)]
element_list = tables[0]['species'][3:]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for i, item in enumerate(element_list):
element_list2.append(item.decode('utf8'))
element_list = np.array(element_list2)
indexing = [re.split(r'(\d+)', s)[1:] for s in element_list]
element_position = []
for i,item in enumerate(element_list):
element_position.append(indexing[i][1])
self.elements = list(np.unique(element_position))
masses = tables[0].dtype.names[3:]
masses_list = []
for i,item in enumerate(masses):
masses_list.append(int(item))
self.masses = masses_list
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yield_tables_final_structure[self.metallicities[metallicity_index]] = np.load(DATADIR + '/chieffi_net_met_ind_%d.npy' %(metallicity_index))
self.table = yield_tables_final_structure
#############################################
def Nugrid(self):
'''
loading the Nugrid sn2 stellar yields NuGrid stellar data set. I. Stellar yields from H to Bi for stars with metallicities Z = 0.02 and Z = 0.01
The wind yields need to be added to the *exp* explosion yields.
No r-process contribution but s and p process from AGB and massive stars
delayed and rapid SN Explosiom postprocessing is included. Rapid is not consistent with very massive stars so we use the 'delayed' yield set
mass in remnants not totally consistent with paper table: [ 6.47634087, 2.67590435, 1.98070676] vs. [6.05,2.73,1.61] see table 4
same with z=0.02 but other elements are implemented in the right way:[ 3.27070753, 8.99349996, 6.12286813, 3.1179861 , 1.96401573] vs. [3,8.75,5.71,2.7,1.6]
we have a switch to change between the two different methods (rapid/delay explosion)
'''
import numpy.lib.recfunctions as rcfuncs
tdtype = [('empty',int),('element1','|S3'),('165',float),('200',float),('300',float),('500',float),('1500',float),('2000',float),('2500',float)]
tdtype2 = [('empty',int),('element1','|S3'),('165',float),('200',float),('300',float),('500',float),('1500',float),('2000',float),('2500',float),('3200',float),('6000',float)]
expdtype = [('empty',int),('element1','|S3'),('15_delay',float),('15_rapid',float),('20_delay',float),('20_rapid',float),('25_delay',float),('25_rapid',float)]
expdtype2 = [('empty',int),('element1','|S3'),('15_delay',float),('15_rapid',float),('20_delay',float),('20_rapid',float),('25_delay',float),('32_delay',float),('32_rapid',float),('60_delay',float)]
yield_tables = {}
self.metallicities = [0.02,0.01]
which_sn_model_to_use = 'delay' # 'rapid'
for i,metallicity_index in enumerate([2,1]):
if i == 0:
z = np.genfromtxt(localpath + 'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_winds.txt' %(metallicity_index,metallicity_index),dtype = tdtype2,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y = np.genfromtxt(localpath + 'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_exp.txt' %(metallicity_index,metallicity_index),dtype = expdtype2,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y['15_%s' %(which_sn_model_to_use)] += z['1500']
y['20_%s' %(which_sn_model_to_use)] += z['2000']
y['25_delay'] += z['2500']
y['32_%s' %(which_sn_model_to_use)] += z['3200']
y['60_delay'] += z['6000']
else:
z = np.genfromtxt(localpath +'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_winds.txt' %(metallicity_index,metallicity_index),dtype = tdtype,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y = np.genfromtxt(localpath + 'input/yields/NuGrid_AGB_SNII_2013/set1p%d/element_table_set1.%d_yields_exp.txt' %(metallicity_index,metallicity_index),dtype = expdtype,names = None,skip_header = 3, delimiter = '&', autostrip = True)
y['15_%s' %(which_sn_model_to_use)] += z['1500']
y['20_%s' %(which_sn_model_to_use)] += z['2000']
y['25_%s' %(which_sn_model_to_use)] += z['2500']
# For python 3 the bytes need to be changed into strings
element_list2 = []
for j,item in enumerate(y['element1']):
element_list2.append(item.decode('utf8'))
y = rcfuncs.append_fields(y,'element',element_list2,usemask = False)
yield_tables[self.metallicities[i]] = y
self.elements = list(yield_tables[0.02]['element'])
# For python 3 the bytes need to be changed into strings
self.masses = np.array((15,20,25,32,60))
######
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = yield_tables[metallicity]
final_mass_name_tag = 'mass_in_remnants'
additional_keys = ['Mass',final_mass_name_tag]
names = additional_keys + self.elements
if metallicity == 0.02:
base = np.zeros(len(self.masses))
else:
base = np.zeros(len(self.masses)-2)
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
if metallicity == 0.02:
yield_tables_final_structure_subtable['Mass'] = self.masses
else:
yield_tables_final_structure_subtable['Mass'] = self.masses[:-2]
for i,item in enumerate(self.elements):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
if metallicity == 0.02:
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['element']==item)]
temp1 = np.zeros(5)
temp1[0] = line_of_one_element['15_%s' %(which_sn_model_to_use)]
temp1[1] = line_of_one_element['20_%s' %(which_sn_model_to_use)]
temp1[2] = line_of_one_element['25_delay']
temp1[3] = line_of_one_element['32_%s' %(which_sn_model_to_use)]
temp1[4] = line_of_one_element['60_delay']
yield_tables_final_structure_subtable[item] = np.divide(temp1,self.masses)
else:
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['element']==item)]
temp1 = np.zeros(3)
temp1[0] = line_of_one_element['15_%s' %(which_sn_model_to_use)]
temp1[1] = line_of_one_element['20_%s' %(which_sn_model_to_use)]
temp1[2] = line_of_one_element['25_%s' %(which_sn_model_to_use)]
yield_tables_final_structure_subtable[item] = np.divide(temp1,self.masses[:-2])
if metallicity == 0.02:
yield_tables_final_structure_subtable[final_mass_name_tag][0] = (1-sum(yield_tables_final_structure_subtable[self.elements][0]))
yield_tables_final_structure_subtable[final_mass_name_tag][1] = (1-sum(yield_tables_final_structure_subtable[self.elements][1]))
yield_tables_final_structure_subtable[final_mass_name_tag][2] = (1-sum(yield_tables_final_structure_subtable[self.elements][2]))
yield_tables_final_structure_subtable[final_mass_name_tag][3] = (1-sum(yield_tables_final_structure_subtable[self.elements][3]))
yield_tables_final_structure_subtable[final_mass_name_tag][4] = (1-sum(yield_tables_final_structure_subtable[self.elements][4]))
else:
yield_tables_final_structure_subtable[final_mass_name_tag][0] = (1-sum(yield_tables_final_structure_subtable[self.elements][0]))
yield_tables_final_structure_subtable[final_mass_name_tag][1] = (1-sum(yield_tables_final_structure_subtable[self.elements][1]))
yield_tables_final_structure_subtable[final_mass_name_tag][2] = (1-sum(yield_tables_final_structure_subtable[self.elements][2]))
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def one_parameter(self, elements, element_fractions):
"""
This function was introduced in order to find best-fit yield sets where each element has just a single yield (no metallicity or mass dependence).
One potential problem is that sn2 feedback has a large fraction of Neon ~ 0.01, the next one missing is Argon but that only has 0.05%. This might spoil the metallicity derivation a bit.
Another problem: He and the remnant mass fraction is not constrained in the APOGEE data. Maybe these can be constrained externally by yield sets or cosmic abundance standard or solar abundances.
"""
self.metallicities = [0.01]
self.masses = np.array([10])
self.elements = elements
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
additional_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_table = np.core.records.fromarrays(list_of_arrays,names=names)
yield_table['Mass'] = self.masses
yield_table['mass_in_remnants'] = 0.1
yield_table['unprocessed_mass_in_winds'] = 1 - yield_table['mass_in_remnants']
for i,item in enumerate(self.elements[1:]):
yield_table[item] = element_fractions[i+1]
yield_table['H'] = -sum(element_fractions[1:])
yield_tables_final_structure[self.metallicities[0]] = yield_table
self.table = yield_tables_final_structure
def Nomoto2013(self):
'''
Nomoto2013 sn2 yields from 13Msun onwards
'''
import numpy.lib.recfunctions as rcfuncs
dt = np.dtype('a13,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
yield_tables = {}
self.metallicities = [0.0500,0.0200,0.0080,0.0040,0.0010]
self.masses = np.array((13,15,18,20,25,30,40))
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=0.0200.dat',dtype=dt,names = True)
yield_tables_dict = {}
for item in self.metallicities:
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=%.4f.dat' %(item),dtype=dt,names = True)
yield_tables_dict[item]=z
hydrogen_list = ['H__1','H__2']
helium_list = ['He_3','He_4']
lithium_list = ['Li_6','Li_7']
berillium_list = ['Be_9']
boron_list = ['B_10','B_11']
carbon_list = ['C_12','C_13']
nitrogen_list = ['N_14','N_15']
oxygen_list = ['O_16','O_17','O_18']
fluorin_list = ['F_19']
neon_list = ['Ne20','Ne21','Ne22']
sodium_list = ['Na23']
magnesium_list = ['Mg24','Mg25','Mg26']
aluminium_list = ['Al27']
silicon_list = ['Si28','Si29','Si30']
phosphorus_list = ['P_31']
sulfur_list = ['S_32','S_33','S_34','S_36']
chlorine_list = ['Cl35','Cl37']
argon_list = ['Ar36','Ar38','Ar40']
potassium_list = ['K_39','K_41']
calcium_list = ['K_40','Ca40','Ca42','Ca43','Ca44','Ca46','Ca48']
scandium_list = ['Sc45']
titanium_list = ['Ti46','Ti47','Ti48','Ti49','Ti50']
vanadium_list = ['V_50','V_51']
chromium_list = ['Cr50','Cr52','Cr53','Cr54']
manganese_list = ['Mn55']
iron_list = ['Fe54', 'Fe56','Fe57','Fe58']
cobalt_list = ['Co59']
nickel_list = ['Ni58','Ni60','Ni61','Ni62','Ni64']
copper_list = ['Cu63','Cu65']
zinc_list = ['Zn64','Zn66','Zn67','Zn68','Zn70']
gallium_list = ['Ga69','Ga71']
germanium_list = ['Ge70','Ge72','Ge73','Ge74']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['Be'] = berillium_list
indexing['B'] = boron_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
indexing['Ga'] = gallium_list
indexing['Ge'] = germanium_list
self.elements = list(indexing.keys())
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yields_for_one_metallicity = yield_tables_dict[metallicity]
# For python 3 the bytes need to be changed into strings
element_list2 = []
for j,item in enumerate(yields_for_one_metallicity['M']):
element_list2.append(item.decode('utf8'))
yields_for_one_metallicity = rcfuncs.append_fields(yields_for_one_metallicity,'element',element_list2,usemask = False)
additional_keys = ['Mass','mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses
#yield_tables_final_structure_subtable['mass_in_remnants'] = yields_for_one_metallicity['M']
temp1 = np.zeros(len(self.masses))
temp1[0] = yields_for_one_metallicity[0][21]
temp1[1] = yields_for_one_metallicity[0][22]
temp1[2] = yields_for_one_metallicity[0][23]
temp1[3] = yields_for_one_metallicity[0][24]
temp1[4] = yields_for_one_metallicity[0][25]
temp1[5] = yields_for_one_metallicity[0][26]
temp1[6] = yields_for_one_metallicity[0][27]
yield_tables_final_structure_subtable['mass_in_remnants'] = np.divide(temp1,self.masses)
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = 0
for j,jtem in enumerate(indexing[item]):
################### here we can change the yield that we need for processing. normalising 'ejected_mass' with the initial mass to get relative masses
line_of_one_element = yields_for_one_metallicity[np.where(yields_for_one_metallicity['element']==jtem)][0]
temp1 = np.zeros(len(self.masses))
temp1[0] = line_of_one_element[21]
temp1[1] = line_of_one_element[22]
temp1[2] = line_of_one_element[23]
temp1[3] = line_of_one_element[24]
temp1[4] = line_of_one_element[25]
temp1[5] = line_of_one_element[26]
temp1[6] = line_of_one_element[27]
yield_tables_final_structure_subtable[item] += np.divide(temp1,self.masses)
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][0] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][0]-sum(yield_tables_final_structure_subtable[self.elements][0]))#yields_for_one_metallicity[0][21]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][1] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][1]-sum(yield_tables_final_structure_subtable[self.elements][1]))#yields_for_one_metallicity[0][22]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][2] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][2]-sum(yield_tables_final_structure_subtable[self.elements][2]))#yields_for_one_metallicity[0][23]#divided by mass because 'mass in remnant' is also normalised
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][3] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][3]-sum(yield_tables_final_structure_subtable[self.elements][3]))#yields_for_one_metallicity[0][24]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][4] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][4]-sum(yield_tables_final_structure_subtable[self.elements][4]))#yields_for_one_metallicity[0][25]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][5] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][5]-sum(yield_tables_final_structure_subtable[self.elements][5]))#yields_for_one_metallicity[0][26]#
yield_tables_final_structure_subtable['unprocessed_mass_in_winds'][6] = (1-yield_tables_final_structure_subtable['mass_in_remnants'][6]-sum(yield_tables_final_structure_subtable[self.elements][6]))#yields_for_one_metallicity[0][27]#
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable#[::-1]
self.table = yield_tables_final_structure
def Nomoto2013_net(self):
'''
Nomoto2013 sn2 yields from 13Msun onwards
'''
import numpy.lib.recfunctions as rcfuncs
dt = np.dtype('a13,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8')
yield_tables = {}
self.metallicities = [0.0500,0.0200,0.0080,0.0040,0.0010]
self.masses = np.array((13,15,18,20,25,30,40))
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=0.0200.dat',dtype=dt,names = True)
yield_tables_dict = {}
for item in self.metallicities:
z = np.genfromtxt(localpath + 'input/yields/Nomoto2013/nomoto_2013_z=%.4f.dat' %(item),dtype=dt,names = True)
yield_tables_dict[item]=z
hydrogen_list = ['H__1','H__2']
helium_list = ['He_3','He_4']
lithium_list = ['Li_6','Li_7']
berillium_list = ['Be_9']
boron_list = ['B_10','B_11']
carbon_list = ['C_12','C_13']
nitrogen_list = ['N_14','N_15']
oxygen_list = ['O_16','O_17','O_18']
fluorin_list = ['F_19']
neon_list = ['Ne20','Ne21','Ne22']
sodium_list = ['Na23']
magnesium_list = ['Mg24','Mg25','Mg26']
aluminium_list = ['Al27']
silicon_list = ['Si28','Si29','Si30']
phosphorus_list = ['P_31']
sulfur_list = ['S_32','S_33','S_34','S_36']
chlorine_list = ['Cl35','Cl37']
argon_list = ['Ar36','Ar38','Ar40']
potassium_list = ['K_39','K_41']
calcium_list = ['K_40','Ca40','Ca42','Ca43','Ca44','Ca46','Ca48']
scandium_list = ['Sc45']
titanium_list = ['Ti46','Ti47','Ti48','Ti49','Ti50']
vanadium_list = ['V_50','V_51']
chromium_list = ['Cr50','Cr52','Cr53','Cr54']
manganese_list = ['Mn55']
iron_list = ['Fe54', 'Fe56','Fe57','Fe58']
cobalt_list = ['Co59']
nickel_list = ['Ni58','Ni60','Ni61','Ni62','Ni64']
copper_list = ['Cu63','Cu65']
zinc_list = ['Zn64','Zn66','Zn67','Zn68','Zn70']
gallium_list = ['Ga69','Ga71']
germanium_list = ['Ge70','Ge72','Ge73','Ge74']
indexing = {}
indexing['H'] = hydrogen_list
indexing['He'] = helium_list
indexing['Li'] = lithium_list
indexing['Be'] = berillium_list
indexing['B'] = boron_list
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
indexing['Ga'] = gallium_list
indexing['Ge'] = germanium_list
self.elements = list(indexing.keys())
### restructuring the tables such that it looks like the sn2 dictionary: basic_agb[metallicicty][element]
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(self.metallicities):
yield_tables_final_structure[metallicity] = np.load(localpath + 'input/yields/Nomoto2013/nomoto_net_met_ind_%d.npy' %(metallicity_index))
self.table = yield_tables_final_structure
#######################
class AGB_feedback(object):
def __init__(self):
"""
This is the object that holds the feedback table for agb stars.
The different methods load different tables from the literature. They are in the input/yields/ folder.
"""
def Ventura(self):
"""
Ventura 2013 net yields from Paolo himself
"""
self.metallicities = [0.04,0.018,0.008,0.004,0.001,0.0003]
x = np.genfromtxt(localpath + 'input/yields/Ventura2013/0.018.txt',names=True)
self.masses = x['Mass']
self.elements = ['H', 'He', 'Li','C','N','O','F','Ne','Na','Mg','Al','Si']
###
yield_tables_final_structure = {}
for metallicity in self.metallicities:
x = np.genfromtxt(localpath + 'input/yields/Ventura2013/%s.txt' %(str(metallicity)),names=True)
additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(x['Mass']))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = x['Mass']
yield_tables_final_structure_subtable['mass_in_remnants'] = | np.divide(x['mass_in_remnants'],x['Mass']) | numpy.divide |
import os.path as osp
import cv2
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from ..registry import PIPELINES
@PIPELINES.register_module
class LoadImageFromFile(object):
def __init__(self, to_float32=False, color_type='color'):
self.to_float32 = to_float32
self.color_type = color_type
def __call__(self, results):
if results['img_prefix'] is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img = mmcv.imread(filename, self.color_type)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def __repr__(self):
return '{} (to_float32={}, color_type={})'.format(
self.__class__.__name__, self.to_float32, self.color_type)
@PIPELINES.register_module
class LoadAnnotations(object):
def __init__(self,
with_bbox=True,
with_label=True,
with_mask=False,
with_seg=False,
with_parsing=False,
RLE2parsing=True,
poly2mask=True):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.poly2mask = poly2mask
self.with_parsing = with_parsing
self.RLE2parsing = RLE2parsing
def _load_bboxes(self, results):
ann_info = results['ann_info']
results['gt_bboxes'] = ann_info['bboxes']
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
if gt_bboxes_ignore is not None:
results['gt_bboxes_ignore'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes_ignore')
results['bbox_fields'].append('gt_bboxes')
return results
def _load_labels(self, results):
results['gt_labels'] = results['ann_info']['labels']
return results
def _poly2mask(self, mask_ann, img_h, img_w):
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def _poly2mask(self, mask_ann, img_h, img_w):
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def _dp_mask_to_mask(self, polys):
semantic_mask = | np.zeros((256, 256), dtype=np.uint8) | numpy.zeros |
# Setup
from __future__ import print_function
from rh_logger.api import logger
import rh_logger
import logging
import os
import numpy as np
import time
import sys
from scipy.spatial import distance
from scipy import spatial
import cv2
import argparse
from mb_aligner.common import utils
from rh_renderer import models
from mb_aligner.alignment.fine_matchers import PMCC_filter
import multiprocessing as mp
from rh_renderer.tilespec_affine_renderer import TilespecAffineRenderer
import threading
from scipy.spatial import cKDTree as KDTree
from collections import defaultdict
# import pyximport
# pyximport.install()
# from ..common import cv_wrap_module
threadLocal = threading.local()
class BlockMatcherPMCCDispatcher(object):
class BlockMatcherPMCC(object):
def __init__(self, sec1, sec2, sec1_to_sec2_transform, **kwargs):
self._scaling = kwargs.get("scaling", 0.2)
self._template_size = kwargs.get("template_size", 200)
self._search_window_size = kwargs.get("search_window_size", 8 * self._template_size)
logger.report_event("Actual template size: {} and window search size: {} (after scaling)".format(self._template_size * self._scaling, self._search_window_size * self._scaling), log_level=logging.INFO)
# Parameters for PMCC filtering
self._min_corr = kwargs.get("min_correlation", 0.2)
self._max_curvature = kwargs.get("maximal_curvature_ratio", 10)
self._max_rod = kwargs.get("maximal_ROD", 0.9)
self._use_clahe = kwargs.get("use_clahe", False)
if self._use_clahe:
self._clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
#self._debug_dir = kwargs.get("debug_dir", None)
self._debug_save_matches = None
self._template_scaled_side = self._template_size * self._scaling / 2
self._search_window_scaled_side = self._search_window_size * self._scaling / 2
self._sec1 = sec1
self._sec2 = sec2
self._sec1_to_sec2_transform = sec1_to_sec2_transform
self._scale_transformation = np.array([
[ self._scaling, 0., 0. ],
[ 0., self._scaling, 0. ]
])
# For section1 there will be a single renderer with transformation and scaling
self._sec1_scaled_renderer = TilespecAffineRenderer(self._sec1.tilespec)
self._sec1_scaled_renderer.add_transformation(self._sec1_to_sec2_transform.get_matrix())
self._sec1_scaled_renderer.add_transformation(self._scale_transformation)
# for section2 there will only be a single renderer (no need to transform back to sec1)
self._sec2_scaled_renderer = TilespecAffineRenderer(self._sec2.tilespec)
self._sec2_scaled_renderer.add_transformation(self._scale_transformation)
def set_debug_dir(self, debug_dir):
self._debug_save_matches = True
self._debug_dir = debug_dir
def match_sec1_to_sec2_mfov(self, sec1_pts):
# Apply the mfov transformation to compute estimated location on sec2
sec1_mfov_pts_on_sec2 = self._sec1_to_sec2_transform.apply(np.atleast_2d(sec1_pts)) * self._scaling
valid_matches = [[], [], []]
invalid_matches = [[], []]
for sec1_pt, sec2_pt_estimated in zip(sec1_pts, sec1_mfov_pts_on_sec2):
# Fetch the template around img1_point (after transformation)
from_x1, from_y1 = sec2_pt_estimated - self._template_scaled_side
to_x1, to_y1 = sec2_pt_estimated + self._template_scaled_side
sec1_template, sec1_template_start_point = self._sec1_scaled_renderer.crop(from_x1, from_y1, to_x1, to_y1)
# Fetch a large sub-image around img2_point (using search_window_scaled_size)
from_x2, from_y2 = sec2_pt_estimated - self._search_window_scaled_side
to_x2, to_y2 = sec2_pt_estimated + self._search_window_scaled_side
sec2_search_window, sec2_search_window_start_point = self._sec2_scaled_renderer.crop(from_x2, from_y2, to_x2, to_y2)
# execute the PMCC match
# Do template matching
if np.any(np.array(sec2_search_window.shape) == 0) or np.any(np.array(sec1_template.shape) == 0):
continue
if sec1_template.shape[0] >= sec2_search_window.shape[0] or sec1_template.shape[1] >= sec2_search_window.shape[1]:
continue
if self._use_clahe:
sec2_search_window_clahe = self._clahe.apply(sec2_search_window)
sec1_template_clahe = self._clahe.apply(sec1_template)
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec2_search_window_clahe, sec1_template_clahe, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
else:
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec2_search_window, sec1_template, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
if pmcc_result is None:
invalid_matches[0].append(sec1_pt)
invalid_matches[1].append(reason)
# debug_out_fname1 = "temp_debug/debug_match_sec1{}-{}_template.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt_estimated[0]), int(sec2_pt_estimated[1]))
# debug_out_fname2 = "temp_debug/debug_match_sec1{}-{}_search_window.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt_estimated[0]), int(sec2_pt_estimated[1]))
# cv2.imwrite(debug_out_fname1, sec1_template)
# cv2.imwrite(debug_out_fname2, sec2_search_window)
else:
# Compute the location of the matched point on img2 in non-scaled coordinates
matched_location_scaled = np.array([reason[1], reason[0]]) + np.array([from_x2, from_y2]) + self._template_scaled_side
sec2_pt = matched_location_scaled / self._scaling
logger.report_event("{}: match found: {} and {} (orig assumption: {})".format(os.getpid(), sec1_pt, sec2_pt, sec2_pt_estimated / self._scaling), log_level=logging.DEBUG)
if self._debug_save_matches:
debug_out_fname1 = os.path.join(self._debug_dir, "debug_match_sec1_{}-{}_sec2_{}-{}_image1.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt[0]), int(sec2_pt[1])))
debug_out_fname2 = os.path.join(self._debug_dir, "debug_match_sec1_{}-{}_sec2_{}-{}_image2.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt[0]), int(sec2_pt[1])))
cv2.imwrite(debug_out_fname1, sec1_template)
sec2_cut_out = sec2_search_window[int(reason[0]):int(reason[0] + 2 * self._template_scaled_side), int(reason[1]):int(reason[1] + 2 * self._template_scaled_side)]
cv2.imwrite(debug_out_fname2, sec2_cut_out)
valid_matches[0].append(np.array(sec1_pt))
valid_matches[1].append(sec2_pt)
valid_matches[2].append(match_val)
return valid_matches, invalid_matches
def match_sec2_to_sec1_mfov(self, sec2_pts):
# Assume that only sec1 renderer was transformed and not sec2 (and both scaled)
sec2_pts = np.asarray(sec2_pts)
sec2_pts_scaled = sec2_pts * self._scaling
mat = self._sec1_to_sec2_transform.get_matrix()
inverse_mat = np.linalg.inv(mat)
#inverse_model = BlockMatcherPMCC.inverse_transform(self._sec1_to_sec2_transform)
#sec2_pts_on_sec1 = inverse_model.apply(sec2_pts)
valid_matches = [[], [], []]
invalid_matches = [[], []]
for sec2_pt, sec2_pt_scaled in zip(sec2_pts, sec2_pts_scaled):
# sec1_pt_estimated is after the sec1_to_sec2 transform
sec1_pt_estimated = sec2_pt_scaled
# Fetch the template around sec2_pt_scaled (no transformation, just scaling)
from_x2, from_y2 = sec2_pt_scaled - self._template_scaled_side
to_x2, to_y2 = sec2_pt_scaled + self._template_scaled_side
sec2_template, sec2_template_start_point = self._sec2_scaled_renderer.crop(from_x2, from_y2, to_x2, to_y2)
# Fetch a large sub-image around sec1_pt_estimated (after transformation, using search_window_scaled_size)
from_x1, from_y1 = sec1_pt_estimated - self._search_window_scaled_side
to_x1, to_y1 = sec1_pt_estimated + self._search_window_scaled_side
sec1_search_window, sec1_search_window_start_point = self._sec1_scaled_renderer.crop(from_x1, from_y1, to_x1, to_y1)
# execute the PMCC match
# Do template matching
if np.any(np.array(sec1_search_window.shape) == 0) or np.any(np.array(sec2_template.shape) == 0):
continue
if sec2_template.shape[0] >= sec1_search_window.shape[0] or sec2_template.shape[1] >= sec1_search_window.shape[1]:
continue
if self._use_clahe:
sec1_search_window_clahe = self._clahe.apply(sec1_search_window)
sec2_template_clahe = self._clahe.apply(sec2_template)
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec1_search_window_clahe, sec2_template_clahe, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
else:
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec1_search_window, sec2_template, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
if pmcc_result is None:
invalid_matches[0].append(sec2_pt)
invalid_matches[1].append(reason)
# debug_out_fname1 = "temp_debug/debug_match_sec2{}-{}_template.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt_estimated[0]), int(sec1_pt_estimated[1]))
# debug_out_fname2 = "temp_debug/debug_match_sec2{}-{}_search_window.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt_estimated[0]), int(sec1_pt_estimated[1]))
# cv2.imwrite(debug_out_fname1, sec2_template)
# cv2.imwrite(debug_out_fname2, sec1_search_window)
else:
# Compute the location of the matched point on img2 in non-scaled coordinates
matched_location_scaled = np.array([reason[1], reason[0]]) + np.array([from_x1, from_y1]) + self._template_scaled_side
sec1_pt = matched_location_scaled / self._scaling
sec1_pt = np.dot(inverse_mat[:2,:2], sec1_pt) + inverse_mat[:2,2]
logger.report_event("{}: match found: {} and {} (orig assumption: {})".format(os.getpid(), sec2_pt, sec1_pt, np.dot(inverse_mat[:2,:2], sec1_pt_estimated / self._scaling) + inverse_mat[:2,2]), log_level=logging.DEBUG)
if self._debug_save_matches:
debug_out_fname1 = os.path.join(self._debug_dir, "debug_match_sec2_{}-{}_sec1_{}-{}_image1.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt[0]), int(sec1_pt[1])))
debug_out_fname2 = os.path.join(self._debug_dir, "debug_match_sec2_{}-{}_sec1_{}-{}_image2.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt[0]), int(sec1_pt[1])))
cv2.imwrite(debug_out_fname1, sec2_template)
sec1_cut_out = sec1_search_window[int(reason[0]):int(reason[0] + 2 * self._template_scaled_side), int(reason[1]):int(reason[1] + 2 * self._template_scaled_side)]
cv2.imwrite(debug_out_fname2, sec1_cut_out)
valid_matches[0].append(sec2_pt)
valid_matches[1].append(sec1_pt)
valid_matches[2].append(match_val)
return valid_matches, invalid_matches
def __init__(self, **kwargs):
self._matcher_kwargs = kwargs
self._mesh_spacing = kwargs.get("mesh_spacing", 1500)
# self._scaling = kwargs.get("scaling", 0.2)
# self._template_size = kwargs.get("template_size", 200)
# self._search_window_size = kwargs.get("search_window_size", 8 * template_size)
# logger.report_event("Actual template size: {} and window search size: {} (after scaling)".format(template_size * scaling, search_window_size * scaling), log_level=logging.INFO)
#
# # Parameters for PMCC filtering
# self._min_corr = kwargs.get("min_correlation", 0.2)
# self._max_curvature = kwargs.get("maximal_curvature_ratio", 10)
# self._max_rod = kwargs.get("maximal_ROD", 0.9)
# self._use_clahe = kwargs.get("use_clahe", False)
self._debug_dir = kwargs.get("debug_dir", None)
if self._debug_dir is not None:
logger.report_event("Debug mode - on", log_level=logging.INFO)
# Create a debug directory
import datetime
self._debug_dir = os.path.join(self._debug_dir, 'debug_matches_{}'.format(datetime.datetime.now().isoformat()))
os.mkdirs(self._debug_dir)
@staticmethod
def _is_point_in_img(img_bbox, point):
"""Returns True if the given point lies inside the image as denoted by the given tile_tilespec"""
# TODO - instead of checking inside the bbox, need to check inside the polygon after transformation
if point[0] > img_bbox[0] and point[1] > img_bbox[2] and \
point[0] < img_bbox[1] and point[1] < img_bbox[3]:
return True
return False
@staticmethod
def sum_invalid_matches(invalid_matches):
if len(invalid_matches[1]) == 0:
return [0] * 5
hist, _ = np.histogram(invalid_matches[1], bins=5)
return hist
@staticmethod
def _perform_matching(sec1_mfov_tile_idx, sec1, sec2, sec1_to_sec2_mfov_transform, sec1_mfov_mesh_pts, sec2_mfov_mesh_pts, debug_dir, matcher_args):
# fine_matcher_key = "block_matcher_{},{},{}".format(sec1.canonical_section_name, sec2.canonical_section_name, sec1_mfov_tile_idx[0])
# fine_matcher = getattr(threadLocal, fine_matcher_key, None)
# if fine_matcher is None:
# fine_matcher = BlockMatcherPMCCDispatcher.BlockMatcherPMCC(sec1, sec2, sec1_to_sec2_mfov_transform, **matcher_args)
# if debug_dir is not None:
# fine_matcher.set_debug_dir(debug_dir)
#
# setattr(threadLocal, fine_matcher_key, fine_matcher)
fine_matcher = BlockMatcherPMCCDispatcher.BlockMatcherPMCC(sec1, sec2, sec1_to_sec2_mfov_transform, **matcher_args)
if debug_dir is not None:
fine_matcher.set_debug_dir(debug_dir)
logger.report_event("Block-Matching+PMCC layers: {} with {} (mfov1 {}) {} mesh points1, {} mesh points2".format(sec1.canonical_section_name, sec2.canonical_section_name, sec1_mfov_tile_idx, len(sec1_mfov_mesh_pts), len(sec2_mfov_mesh_pts)), log_level=logging.INFO)
logger.report_event("Block-Matching+PMCC layers: {} -> {}".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.INFO)
valid_matches1, invalid_matches1 = fine_matcher.match_sec1_to_sec2_mfov(sec1_mfov_mesh_pts)
logger.report_event("Block-Matching+PMCC layers: {} -> {} valid matches: {}, invalid_matches: {} {}".format(sec1.canonical_section_name, sec2.canonical_section_name, len(valid_matches1[0]), len(invalid_matches1[0]), BlockMatcherPMCCDispatcher.sum_invalid_matches(invalid_matches1)), log_level=logging.INFO)
logger.report_event("Block-Matching+PMCC layers: {} <- {}".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.INFO)
valid_matches2, invalid_matches2 = fine_matcher.match_sec2_to_sec1_mfov(sec2_mfov_mesh_pts)
logger.report_event("Block-Matching+PMCC layers: {} <- {} valid matches: {}, invalid_matches: {} {}".format(sec1.canonical_section_name, sec2.canonical_section_name, len(valid_matches2[0]), len(invalid_matches2[0]), BlockMatcherPMCCDispatcher.sum_invalid_matches(invalid_matches2)), log_level=logging.INFO)
return sec1_mfov_tile_idx, valid_matches1, valid_matches2
# def inverse_transform(model):
# mat = model.get_matrix()
# new_model = models.AffineModel(np.linalg.inv(mat))
# return new_model
def match_layers_fine_matching(self, sec1, sec2, sec1_cache, sec2_cache, sec1_to_sec2_mfovs_transforms, pool):
starttime = time.time()
logger.report_event("Block-Matching+PMCC layers: {} with {} (bidirectional)".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.INFO)
# take just the models (w/o the filtered match points)
sec1_to_sec2_mfovs_transforms = {k:v[0] for k, v in sec1_to_sec2_mfovs_transforms.items()}
# create a processes shared per-mfov transform from sec1 to sec2 (and from sec2 to sec1 too)
mfovs1_centers_sec2centers = [[], [], []] # lists of mfovs indexes, mfovs centers, and mfovs centers after transformation to sec2
missing_mfovs1_transforms_centers = [[], []] # lists of missing mfovs in sec1 and their centers
for mfov1 in sec1.mfovs():
mfov1_center = np.array([(mfov1.bbox[0] + mfov1.bbox[1])/2, (mfov1.bbox[2] + mfov1.bbox[3])/2])
if mfov1.mfov_index in sec1_to_sec2_mfovs_transforms and sec1_to_sec2_mfovs_transforms[mfov1.mfov_index] is not None:
mfovs1_centers_sec2centers[0].append(mfov1.mfov_index)
mfovs1_centers_sec2centers[1].append(mfov1_center)
sec1_mfov_model = sec1_to_sec2_mfovs_transforms[mfov1.mfov_index]
mfovs1_centers_sec2centers[2].append(sec1_mfov_model.apply(mfov1_center)[0])
else:
missing_mfovs1_transforms_centers[0].append(mfov1.mfov_index)
missing_mfovs1_transforms_centers[1].append(mfov1_center)
# # find the transformations from sec2 to sec1
# mfovs1_centers_sec2centers = [np.array(mfovs1_centers_sec2centers[0]), np.array(mfovs1_centers_sec2centers[1]), np.array(mfovs1_centers_sec2centers[2])]
# mfovs1_centers_sec2_kdtree = KDTree(mfovs1_centers_sec2centers[2])
# mfovs2_centers = [np.array([(mfov2.bbox[0] + mfov2.bbox[1])/2, (mfov2.bbox[2] + mfov2.bbox[3])/2]) for mfov2 in sec2.mfovs()]
# mfovs2_closest_centers_mfovs1_idxs = mfovs1_centers_sec2_kdtree.query(mfovs2_centers)[1]
# sec2_to_sec1_mfovs_transforms = {mfov2.mfov_index:
# inverse_transform(
# sec1_to_sec2_mfovs_transforms[
# mfovs1_centers_sec2centers[0][mfovs2_closest_centers_mfovs1_idxs[i]]
# ]
# )
# for i, mfov2 in enumerate(sec2.mfovs())}
# estimate the transformation for mfovs in sec1 that do not have one (look at closest neighbor)
if len(missing_mfovs1_transforms_centers[0]) > 0:
mfovs1_centers_sec1_kdtree = KDTree(mfovs1_centers_sec2centers[1])
mfovs1_missing_closest_centers_mfovs1_idxs = mfovs1_centers_sec1_kdtree.query(missing_mfovs1_transforms_centers[1])[1]
missing_mfovs1_sec2_centers = []
for i, (mfov1_index, mfov1_closest_mfov_idx) in enumerate(zip(missing_mfovs1_transforms_centers[0], mfovs1_missing_closest_centers_mfovs1_idxs)):
model = sec1_to_sec2_mfovs_transforms[
mfovs1_centers_sec2centers[0][mfov1_closest_mfov_idx]
]
sec1_to_sec2_mfovs_transforms[mfov1_index] = model
missing_mfovs1_sec2_centers.append(model.apply(np.atleast_2d(missing_mfovs1_transforms_centers[1][i]))[0])
# update the mfovs1_centers_sec2centers lists to include the missing mfovs and their corresponding values
mfovs1_centers_sec2centers[0] = np.concatenate((mfovs1_centers_sec2centers[0], missing_mfovs1_transforms_centers[0]))
mfovs1_centers_sec2centers[1] = np.concatenate((mfovs1_centers_sec2centers[1], missing_mfovs1_transforms_centers[1]))
mfovs1_centers_sec2centers[2] = np.concatenate((mfovs1_centers_sec2centers[2], missing_mfovs1_sec2_centers))
# Lay a grid on top of each section
sec1_mesh_pts = utils.generate_hexagonal_grid(sec1.bbox, self._mesh_spacing)
sec2_mesh_pts = utils.generate_hexagonal_grid(sec2.bbox, self._mesh_spacing)
sec1_tiles_centers = [
[(t.bbox[0] + t.bbox[1])/2, (t.bbox[2] + t.bbox[3])/2]
for t in sec1.tiles()]
sec1_tiles_centers_kdtree = KDTree(sec1_tiles_centers)
sec1_tiles_mfov_tile_idxs = np.array([[t.mfov_index, t.tile_index] for t in sec1.tiles()])
sec2_tiles_centers = [
[(t.bbox[0] + t.bbox[1])/2, (t.bbox[2] + t.bbox[3])/2]
for t in sec2.tiles()]
sec2_tiles_centers_kdtree = KDTree(sec2_tiles_centers)
sec2_tiles_mfov_tile_idxs = np.array([[t.mfov_index, t.tile_index] for t in sec2.tiles()])
# TODO - split the work in a smart way between the processes
# Group the mesh points of sec1 by its mfovs_tiles and make sure the points are in tiles
sec1_mesh_pts_mfov_tile_idxs = sec1_tiles_mfov_tile_idxs[sec1_tiles_centers_kdtree.query(sec1_mesh_pts)[1]]
sec1_per_region_mesh_pts = defaultdict(list)
for sec1_pt, sec1_pt_mfov_tile_idx in zip(sec1_mesh_pts, sec1_mesh_pts_mfov_tile_idxs):
sec1_tile = sec1.get_mfov(sec1_pt_mfov_tile_idx[0]).get_tile(sec1_pt_mfov_tile_idx[1])
if BlockMatcherPMCCDispatcher._is_point_in_img(sec1_tile.bbox, sec1_pt):
sec1_per_region_mesh_pts[tuple(sec1_pt_mfov_tile_idx)].append(sec1_pt)
# Group the mesh pts of sec2 by the mfov on sec1 which they should end up on (mfov1 that after applying its transformation is closest to that point)
# Transform sec1 tiles centers to their estimated location on sec2
sec1_tiles_centers_per_mfov = defaultdict(list)
for sec1_tile_center, sec1_tiles_mfov_tile_idx in zip(sec1_tiles_centers, sec1_tiles_mfov_tile_idxs):
sec1_tiles_centers_per_mfov[sec1_tiles_mfov_tile_idx[0]].append(sec1_tile_center)
sec1_tiles_centers_on_sec2 = [
sec1_to_sec2_mfovs_transforms[mfov_index].apply(np.atleast_2d(mfov1_tiles_centers))
for mfov_index, mfov1_tiles_centers in sec1_tiles_centers_per_mfov.items()
]
sec1_tiles_centers_on_sec2 = np.vstack(tuple(sec1_tiles_centers_on_sec2))
sec1_tiles_centers_on_sec2_kdtree = KDTree(sec1_tiles_centers_on_sec2)
sec2_mesh_pts_sec1_closest_tile_idxs = sec1_tiles_centers_on_sec2_kdtree.query(sec2_mesh_pts)[1]
sec2_mesh_pts_mfov_tile_idxs = sec2_tiles_mfov_tile_idxs[sec2_tiles_centers_kdtree.query(sec2_mesh_pts)[1]]
sec2_per_region1_mesh_pts = defaultdict(list)
for sec2_pt, (sec2_pt_mfov_idx, sec2_pt_tile_idx), sec1_tile_center_idx in zip(sec2_mesh_pts, sec2_mesh_pts_mfov_tile_idxs, sec2_mesh_pts_sec1_closest_tile_idxs):
sec2_tile = sec2.get_mfov(sec2_pt_mfov_idx).get_tile(sec2_pt_tile_idx)
if BlockMatcherPMCCDispatcher._is_point_in_img(sec2_tile.bbox, sec2_pt):
sec2_per_region1_mesh_pts[tuple(sec1_tiles_mfov_tile_idxs[sec1_tile_center_idx])].append(sec2_pt)
# Activate the actual matching
sec1_to_sec2_results = [[], []]
sec2_to_sec1_results = [[], []]
pool_results = []
for region1_key, sec1_region_mesh_pts in sec1_per_region_mesh_pts.items():
sec2_mesh_pts_cur_sec1_region = sec2_per_region1_mesh_pts[region1_key]
#sec1_sec2_mfov_matches, sec2_sec1_mfov_matches = BlockMatcherPMCCDispatcher._perform_matching(sec1_mfov_index, sec1, sec2, sec1_to_sec2_mfovs_transforms[sec1_mfov_index], sec1_mfov_mesh_pts, sec2_mesh_pts_cur_sec1_mfov, self._debug_dir, **self._matcher_kwargs)
res_pool = pool.apply_async(BlockMatcherPMCCDispatcher._perform_matching, (region1_key, sec1, sec2, sec1_to_sec2_mfovs_transforms[region1_key[0]], sec1_region_mesh_pts, sec2_mesh_pts_cur_sec1_region, self._debug_dir, self._matcher_kwargs))
pool_results.append(res_pool)
for res_pool in pool_results:
sec1_region_index, sec1_sec2_region_matches, sec2_sec1_region_matches = res_pool.get()
if len(sec1_sec2_region_matches[0]) > 0:
sec1_to_sec2_results[0].append(sec1_sec2_region_matches[0])
sec1_to_sec2_results[1].append(sec1_sec2_region_matches[1])
if len(sec2_sec1_region_matches[0]) > 0:
sec2_to_sec1_results[0].append(sec2_sec1_region_matches[0])
sec2_to_sec1_results[1].append(sec2_sec1_region_matches[1])
return np.array([np.vstack(sec1_to_sec2_results[0]), np.vstack(sec1_to_sec2_results[1])]), np.array([ | np.vstack(sec2_to_sec1_results[0]) | numpy.vstack |
import imp
import numpy as np
import pytest
import numpy.testing as npt
from pulse2percept import implants
@pytest.mark.parametrize('x', (-100, 200))
@pytest.mark.parametrize('y', (-200, 400))
@pytest.mark.parametrize('rot', (-45, 60))
@pytest.mark.parametrize('eye', ('LE', 'RE'))
def test_IMIE(x, y, rot, eye):
# Create an IMIE and make sure location is correct
imie = implants.IMIE(x, y, rot=rot, eye = eye)
imie0 = implants.IMIE(eye = eye)
# Slots:
npt.assert_equal(hasattr(imie, '__slots__'), True)
npt.assert_equal(hasattr(imie, '__dict__'), False)
# Check if there is 256 electrodes in the array
npt.assert_equal(len(imie.earray.electrodes), 256)
# Coordinates of electrode 'N3'
xy = np.array([imie0['N3'].x, imie0['N3'].y]).T
# Rotate
rot_rad = np.deg2rad(rot)
R = np.array([np.cos(rot_rad), -np.sin(rot_rad),
np.sin(rot_rad), np.cos(rot_rad)]).reshape((2, 2))
xy = np.matmul(R, xy)
# Then off-set: Make sure electrode N3 is placed
# correctly
npt.assert_almost_equal(imie['N3'].x, xy[0] + x)
npt.assert_almost_equal(imie['N3'].y, xy[1] + y)
# Make sure array center is still (x,y)
y_center = imie['H10'].y + (imie['G10'].y - imie['H10'].y) / 2
npt.assert_almost_equal(y_center, y)
x_center = imie['H10'].x + (imie['G10'].x - imie['H10'].x) / 2
npt.assert_almost_equal(x_center, x)
# Make sure the center to center pitch is correct
npt.assert_almost_equal((imie['L1'].x - imie['K1'].x) ** 2 +
(imie['L1'].y - imie['K1'].y) ** 2,
300**2)
npt.assert_almost_equal((imie['A3'].x - imie['A4'].x) ** 2 +
(imie['A3'].y - imie['A4'].y) ** 2,
350**2)
# Check radii of electrodes
for e in ['N16', 'N17', 'A16', 'A17', 'L1', 'K1', 'C1', 'D1']:
npt.assert_almost_equal(imie[e].r, 80.0)
for e in ['A3', 'M15', 'B19', 'C15', 'D13']:
npt.assert_almost_equal(imie[e].r, 105.0)
# `h` must have the right dimensions
with pytest.raises(ValueError):
implants.IMIE(x=-100, y=10, z=np.zeros(5))
with pytest.raises(ValueError):
implants.IMIE(x=-100, y=10, z=[1, 2, 3])
# Right-eye implant:
xc, yc = 500, -500
imie_re = implants.IMIE(eye='RE', x=xc, y=yc)
npt.assert_equal(imie_re['A4'].x > imie_re['A3'].x, True)
npt.assert_almost_equal(imie_re['A4'].y, imie_re['A3'].y)
# need to adjust for reflection about y-axis
# Left-eye implant:
imie_le = implants.IMIE(eye='LE', x=xc, y=yc)
npt.assert_equal(imie_le['A3'].x > imie_le['A4'].x, True)
npt.assert_almost_equal(imie_le['A3'].y, imie_le['A4'].y)
# In both left and right eyes, rotation with positive angle should be
# counter-clock-wise (CCW): for (x>0,y>0), decreasing x and increasing y
for eye, el in zip(['LE', 'RE'], ['L5', 'L17']):
before = implants.IMIE(eye=eye)
after = implants.IMIE(eye=eye, rot=10)
npt.assert_equal(after[el].x < before[el].x, True)
npt.assert_equal(after[el].y > before[el].y, True)
def test_IMIE_stim():
# Assign a stimulus:
implant = implants.IMIE()
implant.stim = {'A3': 1}
| npt.assert_equal(implant.stim.electrodes, ['A3']) | numpy.testing.assert_equal |
from pseas.new_instance_selection.new_instance_selection import NewInstanceSelection
from pseas.model import Model
import numpy as np
from typing import Callable, List
from scipy import optimize
def __compute_distance_matrix__(features: np.ndarray, distance: Callable[[np.ndarray, np.ndarray], float]) -> np.ndarray:
"""
Computes the distance matrix between the instances.
It assumes the distance function is symmetric that is d(x,y)=d(y,x) and it assumes d(x, x)=0.
Parameters:
-----------
- features (np.ndarray) - the features of the instances
- distance (Callable[[np.ndarray, np.ndarray], float]) - a function that given two features compute their distance
Return:
-----------
The distance_matrix (np.ndarray) the distance matrix.
"""
num_instances: int = features.shape[0]
distance_matrix: np.ndarray = np.zeros(
(num_instances, num_instances), dtype=np.float64)
for instance1_index in range(num_instances):
features1: np.ndarray = features[instance1_index]
for instance2_index in range(instance1_index + 1, num_instances):
d: float = distance(features1, features[instance2_index])
distance_matrix[instance2_index, instance1_index] = d
distance_matrix[instance1_index, instance2_index] = d
return distance_matrix
def __find_weights__(x: np.ndarray, y: np.ndarray, mask: np.ndarray) -> np.ndarray:
instances: int = x.shape[0]
features: int = x.shape[1]
removed_instances = np.sum(mask <= 0)
instances -= removed_instances
qty: int = int(instances * (instances - 1) / 2)
dx: np.ndarray = np.zeros((qty, features))
dy: np.ndarray = np.zeros((qty,))
# Compute dataset
index: int = 0
for i in range(instances):
if mask[i] <= 0:
continue
for j in range(i + 1, instances):
if mask[j] <= 0:
continue
dx[index] = x[i] - x[j]
dy[index] = y[i] - y[j]
index += 1
np.square(dx, out=dx)
np.abs(dy, out=dy)
# np.square(dy, out=dy)
# weights = argmin_w_i (norm [w_i (x_i -x'_i)]_i - |y - y'|)^2
weights, residual = optimize.nnls(dx, dy)
return np.sqrt(weights)
class UDD(NewInstanceSelection):
def __init__(self, alpha: float = 1, beta: float = 1, k : int = 5) -> None:
super().__init__()
self.alpha: float = alpha
self.beta: float = beta
self.k : int = k
def __uncertainty(self, perf_matrix: np.ndarray, selectables_instances, model: Model, challenger_configuration) -> List[int]:
"""
Original: Difference between max vote and max second vote for classification
Ours: variance of predictions among forest"""
uncertainty: np.ndarray = np.zeros(perf_matrix.shape[0])
for instance in selectables_instances:
_, var = model.predict(challenger_configuration, instance)
uncertainty[instance] = var
return uncertainty
def __k_nearest_neighbours(self, instance, selectables_instances, distances: np.ndarray):
d = distances[instance, :]
sorted = np.argsort(d)[::-1]
k_best = []
for i in sorted:
if i in selectables_instances and i != instance:
k_best.append(i)
if len(k_best) == self.k:
break
return k_best
def __density(self, selectables_instances, distances: np.ndarray):
densities = np.zeros(distances.shape[0], float)
for instance in selectables_instances[:]:
neighbours = self.__k_nearest_neighbours(instance, selectables_instances, distances)
total: float = 0
for neighbour in neighbours:
dist: float = distances[instance, neighbour]
total += dist*dist
total /= max(1, len(neighbours))
densities[instance] = total
return densities
def __diversity(self, selectables_instances, distances: np.ndarray):
done_mask = np.array([i not in selectables_instances for i in range(distances.shape[0])])
if np.any(done_mask):
diversities = np.min(distances[:, done_mask], axis=1)
diversities[done_mask] = 0
else:
diversities = np.zeros((len(selectables_instances)))
return diversities
def select(self, challenger_configuration: int, incumbent_configuration: int, perf_matrix: np.ndarray, perf_mask: np.ndarray, model: Model, predicted_perf_matrix: np.ndarray, instance_features: np.ndarray) -> int:
mask = np.sum(perf_mask, axis=1)
# Find optimal distance function
y = np.zeros((perf_matrix.shape[0]))
for instance in range(y.shape[0]):
if np.any(perf_mask[instance]):
times = perf_matrix[instance, perf_mask[instance]]
y[instance] = np.median(times)
weights: np.ndarray = __find_weights__(instance_features, y, mask)
distances = __compute_distance_matrix__(instance_features, lambda x1, x2: np.linalg.norm(weights * (x1 - x2)))
selectables_instances = [i for i in range(perf_matrix.shape[0]) if not np.any(perf_mask[i, :])]
uncertainties = self.__uncertainty(perf_matrix, selectables_instances, model, challenger_configuration)
# Normalize values in [0, 1]
uncertainties -= np.min(uncertainties)
uncertainties /= max(1e-3, np.max(uncertainties))
if self.alpha == 0 and self.beta == 0:
scores = uncertainties
else:
densities = self.__density(selectables_instances, distances)
diversities = self.__diversity(selectables_instances, distances)
# Normalize values in [0, 1]
densities -= np.min(densities)
diversities -= np.min(diversities)
densities /= max(1e-3, | np.max(densities) | numpy.max |
# Copyright 2016 Sandia Corporation and the National Renewable Energy
# Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
The Extreme Sea State Contour (ESSC) module contains the tools necessary to
calculate environmental contours of extreme sea states for buoy data.
'''
import numpy as np
import scipy.stats as stats
import scipy.optimize as optim
import scipy.interpolate as interp
import matplotlib.pyplot as plt
import h5py
from sklearn.decomposition import PCA as skPCA
import requests
import bs4
import urllib.request
import re
from datetime import datetime, date
import os
import glob
import copy
import statsmodels.api as sm
from statsmodels import robust
import urllib
import matplotlib
class EA:
'''The Environmental Assessment (EA) class points to functions for
various contour methods (including getContours and getSamples) and allows
the user to plot results (plotData), sample along the contour
(getContourPoints), calculate the wave breaking steepness curve (steepness)
and/or use the bootstrap method to calculate 95% confidence bounds about
the contours (bootStrap).'''
def __init__():
return
def getContours():
'''Points to the getContours function in whatever contouring method is used'''
return
def getSamples():
'''Points to the getSamples function in whatever contouring method is
used, currently only implemented for PCA contours. Implementation for
additional contour methods planned for future release.'''
return
def saveContour(self, fileName=None):
'''Saves all available contour data obtained via the EA module to
a .h5 file
Parameters
----------
fileName : string
relevent path and filename where the .h5 file will be created and
saved. If no filename, the h5 file will be named NDBC(buoyNum).h5
'''
if (fileName is None):
fileName = 'NDBC' + str(self.buoy.buoyNum) + '.h5'
else:
_, file_extension = os.path.splitext(fileName)
if not file_extension:
fileName = fileName + '.h5'
print(fileName);
with h5py.File(fileName, 'a') as f:
if('method' in f):
f['method'][...] = self.method
else:
f.create_dataset('method', data=self.method)
if('parameters' in f):
gp = f['parameters']
else:
gp = f.create_group('parameters')
self._saveParams(gp)
if(self.Hs_ReturnContours is not None):
if('ReturnContours' in f):
grc = f['ReturnContours']
else:
grc = f.create_group('ReturnContours')
if('T_Return' in grc):
f_T_Return = grc['T_Return']
f_T_Return[...] = self.T_ReturnContours
else:
f_T_Return = grc.create_dataset('T_Return', data=self.T_ReturnContours)
f_T_Return.attrs['units'] = 's'
f_T_Return.attrs['description'] = 'contour, energy period'
if('Hs_Return' in grc):
f_Hs_Return = grc['Hs_Return']
f_Hs_Return[...] = self.Hs_ReturnContours
else:
f_Hs_Return = grc.create_dataset('Hs_Return', data=self.Hs_ReturnContours)
f_Hs_Return.attrs['units'] = 'm'
f_Hs_Return.attrs['description'] = 'contours, significant wave height'
# Samples for full sea state long term analysis
if(hasattr(self, 'Hs_SampleFSS') and self.Hs_SampleFSS is not None):
if('Samples_FullSeaState' in f):
gfss = f['Samples_FullSeaState']
else:
gfss = f.create_group('Samples_FullSeaState')
if('Hs_SampleFSS' in gfss):
f_Hs_SampleFSS = gfss['Hs_SampleFSS']
f_Hs_SampleFSS[...] = self.Hs_SampleFSS
else:
f_Hs_SampleFSS = gfss.create_dataset('Hs_SampleFSS', data=self.Hs_SampleFSS)
f_Hs_SampleFSS.attrs['units'] = 'm'
f_Hs_SampleFSS.attrs['description'] = 'full sea state significant wave height samples'
if('T_SampleFSS' in gfss):
f_T_SampleFSS = gfss['T_SampleFSS']
f_T_SampleFSS[...] = self.T_SampleFSS
else:
f_T_SampleFSS = gfss.create_dataset('T_SampleFSS', data=self.T_SampleFSS)
f_T_SampleFSS.attrs['units'] = 's'
f_T_SampleFSS.attrs['description'] = 'full sea state energy period samples'
if('Weight_SampleFSS' in gfss):
f_Weight_SampleFSS = gfss['Weight_SampleFSS']
f_Weight_SampleFSS[...] = self.Weight_SampleFSS
else:
f_Weight_SampleFSS = gfss.create_dataset('Weight_SampleFSS', data = self.Weight_SampleFSS)
f_Weight_SampleFSS.attrs['description'] = 'full sea state relative weighting samples'
# Samples for contour approach long term analysis
if(hasattr(self, 'Hs_SampleCA') and self.Hs_SampleCA is not None):
if('Samples_ContourApproach' in f):
gca = f['Samples_ContourApproach']
else:
gca = f.create_group('Samples_ContourApproach')
if('Hs_SampleCA' in gca):
f_Hs_sampleCA = gca['Hs_SampleCA']
f_Hs_sampleCA[...] = self.Hs_SampleCA
else:
f_Hs_sampleCA = gca.create_dataset('Hs_SampleCA', data=self.Hs_SampleCA)
f_Hs_sampleCA.attrs['units'] = 'm'
f_Hs_sampleCA.attrs['description'] = 'contour approach significant wave height samples'
if('T_SampleCA' in gca):
f_T_sampleCA = gca['T_SampleCA']
f_T_sampleCA[...] = self.T_SampleCA
else:
f_T_sampleCA = gca.create_dataset('T_SampleCA', data=self.T_SampleCA)
f_T_sampleCA.attrs['units'] = 's'
f_T_sampleCA.attrs['description'] = 'contour approach energy period samples'
def plotData(self):
"""
Display a plot of the 100-year return contour, full sea state samples
and contour samples
"""
plt.figure()
plt.plot(self.buoy.T, self.buoy.Hs, 'bo', alpha=0.1, label='NDBC data')
plt.plot(self.T_ReturnContours, self.Hs_ReturnContours, 'k-', label='100 year contour')
#plt.plot(self.T_SampleFSS, self.Hs_SampleFSS, 'ro', label='full sea state samples')
#plt.plot(self.T_SampleCA, self.Hs_SampleCA, 'y^', label='contour approach samples')
plt.legend(loc='lower right', fontsize='small')
plt.grid(True)
plt.xlabel('Energy period, $T_e$ [s]')
plt.ylabel('Sig. wave height, $H_s$ [m]')
plt.show()
def getContourPoints(self, T_Sample):
'''Get Hs points along a specified environmental contour using
user-defined T values.
Parameters
----------
T_Sample : nparray
points for sampling along return contour
Returns
-------
Hs_SampleCA : nparray
points sampled along return contour
Example
-------
To calculate Hs values along the contour at specific
user-defined T values:
import WDRT.ESSC as ESSC
import numpy as np
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
pca46022 = ESSC.PCA(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Generate contour
Hs_Return, T_Return = pca46022.getContours(Time_SS, Time_r,nb_steps)
# Use getContourPoints to find specific points along the contour
T_sampleCA = np.arange(12, 26, 2)
Hs_sampleCA = pca46022.getContourPoints(T_sampleCA)
'''
#finds minimum and maximum energy period values
amin = np.argmin(self.T_ReturnContours)
amax = np.argmax(self.T_ReturnContours)
#finds points along the contour
w1 = self.Hs_ReturnContours[amin:amax]
w2 = np.concatenate((self.Hs_ReturnContours[amax:], self.Hs_ReturnContours[:amin]))
if (np.max(w1) > np.max(w2)):
x1 = self.T_ReturnContours[amin:amax]
y = self.Hs_ReturnContours[amin:amax]
else:
x1 = np.concatenate((self.T_ReturnContours[amax:], self.T_ReturnContours[:amin]))
y1 = np.concatenate((self.Hs_ReturnContours[amax:], self.Hs_ReturnContours[:amin]))
#sorts data based on the max and min energy period values
ms = np.argsort(x1)
x = x1[ms]
y = y1[ms]
#interpolates the sorted data
si = interp.interp1d(x, y)
#finds the wave height based on the user specified energy period values
Hs_SampleCA = si(T_Sample)
self.T_SampleCA = T_Sample
self.Hs_SampleCA = Hs_SampleCA
return Hs_SampleCA
def steepness(self, SteepMax, T_vals, depth = None):
'''This function calculates a steepness curve to be plotted on an H vs. T
diagram. First, the function calculates the wavelength based on the
depth and T. The T vector can be the input data vector, or will be
created below to cover the span of possible T values.
The function solves the dispersion relation for water waves
using the Newton-Raphson method. All outputs are solved for exactly
using: :math:`hw^2/g = kh*tanh(khG)`
Approximations that could be used in place of this code for deep
and shallow water, as appropriate:
deep water: :math:`h/\lambda \geq 1/2, tanh(kh) \sim 1, \lambda = (gT^2)/(2\pi)`
shallow water: :math:`h/\lambda \leq 1/20, tanh(kh) \sim kh, \lambda = \sqrt{T(gh)}`
Parameters
----------
SteepMax: float
Wave breaking steepness estimate (e.g., 0.07).
T_vals :np.array
Array of T values [sec] at which to calculate the breaking height.
depth: float
Depth at site
Note: if not inputted, the depth will tried to be grabbed from the respective
buoy type's website.
Returns
-------
SteepH: np.array
H values [m] that correspond to the T_mesh values creating the
steepness curve.
T_steep: np.array
T values [sec] over which the steepness curve is defined.
Example
-------
To find limit the steepness of waves on a contour by breaking:
import numpy as np
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
pca46022 = ESSC.PCA(buoy46022)
T_vals = np.arange(0.1, np.amax(buoy46022.T), 0.1)
# Enter estimate of breaking steepness
SteepMax = 0.07 # Reference DNV-RP-C205
# Declare required parameters
depth = 391.4 # Depth at measurement point (m)
SteepH = pca46022.steepness(depth,SteepMax,T_vals)
'''
# Calculate the wavelength at a given depth at each value of T
if depth == None:
depth = self.__fetchDepth()
lambdaT = []
g = 9.81 # [m/s^2]
omega = ((2 * np.pi) / T_vals)
lambdaT = []
for i in range(len(T_vals)):
# Initialize kh using Eckart 1952 (mentioned in Holthuijsen pg. 124)
kh = (omega[i]**2) * depth / \
(g * (np.tanh((omega[i]**2) * depth / g)**0.5))
# Find solution using the Newton-Raphson Method
for j in range(1000):
kh0 = kh
f0 = (omega[i]**2) * depth / g - kh0 * np.tanh(kh0)
df0 = -np.tanh(kh) - kh * (1 - np.tanh(kh)**2)
kh = -f0 / df0 + kh0
f = (omega[i]**2) * depth / g - kh * np.tanh(kh)
if abs(f0 - f) < 10**(-6):
break
lambdaT.append((2 * np.pi) / (kh / depth))
del kh, kh0
lambdaT = np.array(lambdaT, dtype=np.float)
SteepH = lambdaT * SteepMax
return SteepH
def __fetchDepth(self):
'''Obtains the depth from the website for a buoy (either NDBC or CDIP)'''
if self.buoy.buoyType == "NDBC":
url = "https://www.ndbc.noaa.gov/station_page.php?station=%s" % (46022)
ndbcURL = requests.get(url)
ndbcURL.raise_for_status()
ndbcHTML = bs4.BeautifulSoup(ndbcURL.text, "lxml")
header = ndbcHTML.find("b", text="Water depth:")
return float(str(header.nextSibling).split()[0])
elif self.buoy.buoyType == "CDIP":
url = "http://cdip.ucsd.edu/cgi-bin/wnc_metadata?ARCHIVE/%sp1/%sp1_historic" % (self.buoy.buoyNum, self.buoy.buoyNum)
cdipURL = requests.get(url)
cdipURL.raise_for_status()
cdipHTML = bs4.BeautifulSoup(cdipURL.text, "lxml")
#Parse the table for the depth value
depthString = str(cdipHTML.findChildren("td", {"class" : "plus"})[0])
depthString = depthString.split("<br/>")[2]
return float(re.findall(r"[-+]?\d*\.\d+|\d+", depthString)[0])
def bootStrap(self, boot_size=1000, plotResults=True):
'''Get 95% confidence bounds about a contour using the bootstrap
method. Warning - this function is time consuming. Computation
time depends on selected boot_size.
Parameters
----------
boot_size: int (optional)
Number of bootstrap samples that will be used to calculate 95%
confidence interval. Should be large enough to calculate stable
statistics. If left blank will be set to 1000.
plotResults: boolean (optional)
Option for showing plot of bootstrap confidence bounds. If left
blank will be set to True and plot will be shown.
Returns
-------
contourmean_Hs : nparray
Hs values for mean contour calculated as the average over all
bootstrap contours.
contourmean_T : nparray
T values for mean contour calculated as the average over all
bootstrap contours.
Example
-------
To generate 95% boostrap contours for a given contour method:
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
pca46022 = ESSC.PCA(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Contour generation
Hs_Return, T_Return = pca46022.getContours(Time_SS, Time_r,nb_steps)
# Calculate boostrap confidence interval
contourmean_Hs, contourmean_T = pca46022.bootStrap(boot_size=10)
'''
if (self.method == "Bivariate KDE, Log Transform" or
self.method == "Bivariate KDE"):
msg = 'WDRT does not support the bootstrap method for this Bivariate KDE (See Issue #47).'
print(msg)
return None, None
#preallocates arrays
n = len(self.buoy.Hs)
Hs_Return_Boot = np.zeros([self.nb_steps,boot_size])
T_Return_Boot = np.zeros([self.nb_steps,boot_size])
buoycopy = copy.deepcopy(self.buoy);
#creates copies of the data based on how it was modeled.
for i in range(boot_size):
boot_inds = np.random.randint(0, high=n, size=n)
buoycopy.Hs = copy.deepcopy(self.buoy.Hs[boot_inds])
buoycopy.T = copy.deepcopy(self.buoy.T[boot_inds])
essccopy=None
if self.method == "Principle component analysis":
essccopy = PCA(buoycopy, self.size_bin)
elif self.method == "Gaussian Copula":
essccopy = GaussianCopula(buoycopy, self.n_size, self.bin_1_limit, self.bin_step)
elif self.method == "Rosenblatt":
essccopy = Rosenblatt(buoycopy, self.n_size, self.bin_1_limit, self.bin_step)
elif self.method == "Clayton Copula":
essccopy = ClaytonCopula(buoycopy, self.n_size, self.bin_1_limit, self.bin_step)
elif self.method == "Gumbel Copula":
essccopy = GumbelCopula(buoycopy, self.n_size, self.bin_1_limit, self.bin_step, self.Ndata)
elif self.method == "Non-parametric Gaussian Copula":
essccopy = NonParaGaussianCopula(buoycopy, self.Ndata, self.max_T, self.max_Hs)
elif self.method == "Non-parametric Clayton Copula":
essccopy = NonParaClaytonCopula(buoycopy, self.Ndata, self.max_T, self.max_Hs)
elif self.method == "Non-parametric Gumbel Copula":
essccopy = NonParaGumbelCopula(buoycopy, self.Ndata, self.max_T, self.max_Hs)
Hs_Return_Boot[:,i],T_Return_Boot[:,i] = essccopy.getContours(self.time_ss, self.time_r, self.nb_steps)
#finds 95% CI values for wave height and energy
contour97_5_Hs = np.percentile(Hs_Return_Boot,97.5,axis=1)
contour2_5_Hs = np.percentile(Hs_Return_Boot,2.5,axis=1)
contourmean_Hs = np.mean(Hs_Return_Boot, axis=1)
contour97_5_T = np.percentile(T_Return_Boot,97.5,axis=1)
contour2_5_T = np.percentile(T_Return_Boot,2.5,axis=1)
contourmean_T = np.mean(T_Return_Boot, axis=1)
self.contourMean_Hs = contourmean_Hs
self.contourMean_T = contourmean_T
#plotting function
def plotResults():
plt.figure()
plt.plot(self.buoy.T, self.buoy.Hs, 'bo', alpha=0.1, label='NDBC data')
plt.plot(self.T_ReturnContours, self.Hs_ReturnContours, 'k-', label='100 year contour')
plt.plot(contour97_5_T, contour97_5_Hs, 'r--', label='95% bootstrap confidence interval')
plt.plot(contour2_5_T, contour2_5_Hs, 'r--')
plt.plot(contourmean_T, contourmean_Hs, 'r-', label='Mean bootstrap contour')
plt.legend(loc='lower right', fontsize='small')
plt.grid(True)
plt.xlabel('Energy period, $T_e$ [s]')
plt.ylabel('Sig. wave height, $H_s$ [m]')
plt.show()
if plotResults:
plotResults()
return contourmean_Hs, contourmean_T
def outsidePoints(self):
'''Determines which buoy observations are outside of a given contour.
Parameters
----------
None
Returns
-------
outsideHs : nparray
The Hs values of the observations that are outside of the contour
outsideT : nparray
The T values of the observations that are outside of the contour
Example
-------
To get correseponding T and Hs arrays of observations that are outside
of a given contour:
import WDRT.ESSC as ESSC
import numpy as np
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
rosen46022 = ESSC.Rosenblatt(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
# Generate contour
Hs_Return, T_Return = rosen46022.getContours(Time_SS, Time_r)
# Return the outside point Hs/T combinations
outsideT, outsideHs = rosen46022.outsidePoints()
'''
#checks if the contour type is a KDE contour - if so, finds the outside points for the KDE contour.
if isinstance(self.T_ReturnContours,list):
contains_test = np.zeros(len(self.buoy.T),dtype=bool)
for t,hs in zip(self.T_ReturnContours,self.Hs_ReturnContours):
path_contour = []
path_contour = matplotlib.path.Path(np.column_stack((t,hs)))
contains_test = contains_test+path_contour.contains_points(np.column_stack((self.buoy.T,self.buoy.Hs)))
out_inds = np.where(~contains_test)
else: # For non-KDE methods (copulas, etc.)
path_contour = matplotlib.path.Path(np.column_stack((self.T_ReturnContours,self.Hs_ReturnContours)))
contains_test = path_contour.contains_points(np.column_stack((self.buoy.T,self.buoy.Hs)))
out_inds = np.where(~contains_test)
outsideHs =self.buoy.Hs[out_inds]
outsideT = self.buoy.T[out_inds]
return(outsideT, outsideHs)
def contourIntegrator(self):
'''Calculates the area of the contour over the two-dimensional input
space of interest.
Parameters
----------
None
Returns
-------
area : float
The area of the contour in TxHs units.
Example
-------
To obtain the area of the contour:
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
rosen46022 = ESSC.Rosenblatt(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
# Generate contour
Hs_Return, T_Return = rosen46022.getContours(Time_SS, Time_r)
# Return the area of the contour
rosenArea = rosen46022.contourIntegrator()
'''
contourTs = self.T_ReturnContours
contourHs = self.Hs_ReturnContours
area = 0.5*np.abs(np.dot(contourTs,np.roll(contourHs,1))-np.dot(contourHs,np.roll(contourTs,1)))
return area
def dataContour(self, tStepSize = 1, hsStepSize = .5):
'''Creates a contour around the ordered pairs of buoy observations. How tightly
the contour fits around the data will be determined by step size parameters.
Please note that this function currently is in beta; it needs further work to be
optimized for use.
Parameters
----------
tStepSize : float
Determines how far to search for the next point in the T direction.
Smaller values will produce contours that follow the data more closely.
hsStepSize : float
Determines how far to search for the next point in the Hs direction.
Smaller values will produce contours that follow the data more closely.
Returns
-------
dataBoundryHs : nparray
The Hs values of the boundry observations
dataBoundryT : nparray
The Hs values of the boundry observations
Example
-------
To get the corresponding data contour:
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create PCA EA object for buoy
rosen46022 = ESSC.Rosenblatt(buoy46022)
# Calculate the data contour
dataHs, dataT = rosen46022.dataContour(tStepSize = 1, hsStepSize = .5)
'''
maxHs = max(self.buoy.Hs)
minHs = min(self.buoy.Hs)
sortedHsBuoy = copy.deepcopy(self.buoy)
sortedTBuoy = copy.deepcopy(self.buoy)
sortedTIndex = sorted(range(len(self.buoy.T)),key=lambda x:self.buoy.T[x])
sortedHsIndex = sorted(range(len(self.buoy.Hs)),key=lambda x:self.buoy.Hs[x])
sortedHsBuoy.Hs = self.buoy.Hs[sortedHsIndex]
sortedHsBuoy.T = self.buoy.T[sortedHsIndex]
sortedTBuoy.Hs = self.buoy.Hs[sortedTIndex]
sortedTBuoy.T = self.buoy.T[sortedTIndex]
hsBin1 = []
hsBin2 = []
hsBin3 = []
hsBin4 = []
tBin1 = []
tBin2 = []
tBin3 = []
tBin4 = []
startingPoint = sortedTBuoy.T[0]
hsBin4.append(sortedTBuoy.Hs[0])
tBin4.append(sortedTBuoy.T[0])
while True:
tempNextBinTs = sortedTBuoy.T[sortedTBuoy.T < startingPoint + tStepSize]
tempNextBinHs = sortedTBuoy.Hs[sortedTBuoy.T < startingPoint + tStepSize]
nextBinTs = tempNextBinTs[tempNextBinTs > startingPoint]
nextBinHs = tempNextBinHs[tempNextBinTs > startingPoint]
try:
nextHs = max(nextBinHs)
nextT = nextBinTs[nextBinHs.argmax(axis=0)]
hsBin4.append(nextHs)
tBin4.append(nextT)
startingPoint = nextT
except ValueError:
startingPoint += tStepSize
break
if nextHs == maxHs:
break
startingPoint = sortedTBuoy.T[0]
hsBin1.append(sortedTBuoy.Hs[0])
tBin1.append(sortedTBuoy.T[0])
while True:
tempNextBinTs = sortedTBuoy.T[sortedTBuoy.T < startingPoint + tStepSize]
tempNextBinHs = sortedTBuoy.Hs[sortedTBuoy.T < startingPoint + tStepSize]
nextBinTs = tempNextBinTs[tempNextBinTs > startingPoint]
nextBinHs = tempNextBinHs[tempNextBinTs > startingPoint]
try:
nextHs = min(nextBinHs)
nextT = nextBinTs[nextBinHs.argmin(axis=0)]
hsBin1.append(nextHs)
tBin1.append(nextT)
startingPoint = nextT
except ValueError:
startingPoint += tStepSize
break
if nextHs == minHs:
break
startingPoint = sortedHsBuoy.Hs[sortedHsBuoy.T.argmax(axis=0)]
hsBin3.append(sortedHsBuoy.Hs[sortedHsBuoy.T.argmax(axis=0)])
tBin3.append(sortedHsBuoy.T[sortedHsBuoy.T.argmax(axis=0)])
while True:
tempNextBinTs = sortedHsBuoy.T[sortedHsBuoy.Hs < startingPoint + hsStepSize]
tempNextBinHs = sortedHsBuoy.Hs[sortedHsBuoy.Hs < startingPoint + hsStepSize]
nextBinTs = tempNextBinTs[tempNextBinHs > startingPoint]
nextBinHs = tempNextBinHs[tempNextBinHs > startingPoint]
try:
nextT = max(nextBinTs)
nextHs = nextBinHs[nextBinTs.argmax(axis=0)]
if nextHs not in hsBin4 and nextHs not in hsBin1:
hsBin3.append(nextHs)
tBin3.append(nextT)
startingPoint = nextHs
except ValueError:
startingPoint += hsStepSize
break
if nextHs == maxHs:
break
startingPoint = sortedHsBuoy.Hs[sortedHsBuoy.T.argmax(axis=0)]
while True:
tempNextBinTs = sortedHsBuoy.T[sortedHsBuoy.Hs > startingPoint - hsStepSize]
tempNextBinHs = sortedHsBuoy.Hs[sortedHsBuoy.Hs > startingPoint - hsStepSize]
nextBinTs = tempNextBinTs[tempNextBinHs < startingPoint]
nextBinHs = tempNextBinHs[tempNextBinHs < startingPoint]
try:
nextT = max(nextBinTs)
nextHs = nextBinHs[nextBinTs.argmax(axis=0)]
if nextHs not in hsBin1 and nextHs not in hsBin4:
hsBin2.append(nextHs)
tBin2.append(nextT)
startingPoint = nextHs
except ValueError:
startingPoint = startingPoint - hsStepSize
break
if nextHs == minHs:
break
hsBin2 = hsBin2[::-1] # Reverses the order of the array
tBin2 = tBin2[::-1]
hsBin4 = hsBin4[::-1] # Reverses the order of the array
tBin4 = tBin4[::-1]
dataBoundryHs = np.concatenate((hsBin1,hsBin2,hsBin3,hsBin4),axis = 0)
dataBoundryT = np.concatenate((tBin1,tBin2,tBin3,tBin4),axis = 0)
dataBoundryHs = dataBoundryHs[::-1]
dataBoundryT = dataBoundryT[::-1]
return(dataBoundryHs, dataBoundryT)
def __getCopulaParams(self,n_size,bin_1_limit,bin_step):
sorted_idx = sorted(range(len(self.buoy.Hs)),key=lambda x:self.buoy.Hs[x])
Hs = self.buoy.Hs[sorted_idx]
T = self.buoy.T[sorted_idx]
# Estimate parameters for Weibull distribution for component 1 (Hs) using MLE
# Estimate parameters for Lognormal distribution for component 2 (T) using MLE
para_dist_1=stats.exponweib.fit(Hs,floc=0,fa=1)
para_dist_2=stats.norm.fit(np.log(T))
# Binning
ind = np.array([])
ind = np.append(ind,sum(Hs_val <= bin_1_limit for Hs_val in Hs))
# Make sure first bin isn't empty or too small to avoid errors
while ind == 0 or ind < n_size:
ind = np.array([])
bin_1_limit = bin_1_limit + bin_step
ind = np.append(ind,sum(Hs_val <= bin_1_limit for Hs_val in Hs))
for i in range(1,200):
bin_i_limit = bin_1_limit+bin_step*(i)
ind = np.append(ind,sum(Hs_val <= bin_i_limit for Hs_val in Hs))
if (ind[i-0]-ind[i-1]) < n_size:
break
# Parameters for conditional distribution of T|Hs for each bin
num=len(ind) # num+1: number of bins
para_dist_cond = []
hss = []
para_dist_cond.append(stats.norm.fit(np.log(T[range(0,int(ind[0]))]))) # parameters for first bin
hss.append(np.mean(Hs[range(0,int(ind[0])-1)])) # mean of Hs (component 1 for first bin)
para_dist_cond.append(stats.norm.fit(np.log(T[range(0,int(ind[1]))]))) # parameters for second bin
hss.append(np.mean(Hs[range(0,int(ind[1])-1)])) # mean of Hs (component 1 for second bin)
for i in range(2,num):
para_dist_cond.append(stats.norm.fit(np.log(T[range(int(ind[i-2]),int(ind[i]))])));
hss.append(np.mean(Hs[range(int(ind[i-2]),int(ind[i]))]))
# Estimate coefficient using least square solution (mean: third order, sigma: 2nd order)
para_dist_cond.append(stats.norm.fit(np.log(T[range(int(ind[num-2]),int(len(Hs)))]))); # parameters for last bin
hss.append(np.mean(Hs[range(int(ind[num-2]),int(len(Hs)))])) # mean of Hs (component 1 for last bin)
para_dist_cond = np.array(para_dist_cond)
hss = np.array(hss)
phi_mean = np.column_stack((np.ones(num+1),hss[:],hss[:]**2,hss[:]**3))
phi_std = np.column_stack((np.ones(num+1),hss[:],hss[:]**2))
# Estimate coefficients of mean of Ln(T|Hs)(vector 4x1) (cubic in Hs)
mean_cond = np.linalg.lstsq(phi_mean,para_dist_cond[:,0])[0]
# Estimate coefficients of standard deviation of Ln(T|Hs) (vector 3x1) (quadratic in Hs)
std_cond = np.linalg.lstsq(phi_std,para_dist_cond[:,1])[0]
return para_dist_1, para_dist_2, mean_cond, std_cond
def __getNonParaCopulaParams(self,Ndata, max_T, max_Hs):
sorted_idx = sorted(range(len(self.buoy.Hs)),key=lambda x:self.buoy.Hs[x])
Hs = self.buoy.Hs[sorted_idx]
T = self.buoy.T[sorted_idx]
# Calcualte KDE bounds (this may be added as an input later)
min_limit_1 = 0
max_limit_1 = max_Hs
min_limit_2 = 0
max_limit_2 = max_T
# Discretize for KDE
pts_hs = np.linspace(min_limit_1, max_limit_1, self.Ndata)
pts_t = np.linspace(min_limit_2, max_limit_2, self.Ndata)
# Calculate optimal bandwidth for T and Hs
sig = robust.scale.mad(T)
num = float(len(T))
bwT = sig*(4.0/(3.0*num))**(1.0/5.0)
sig = robust.scale.mad(Hs)
num = float(len(Hs))
bwHs = sig*(4.0/(3.0*num))**(1.0/5.0)
# Nonparametric PDF for T
temp = sm.nonparametric.KDEUnivariate(T)
temp.fit(bw = bwT)
f_t = temp.evaluate(pts_t)
# Nonparametric CDF for Hs
temp = sm.nonparametric.KDEUnivariate(Hs)
temp.fit(bw = bwHs)
tempPDF = temp.evaluate(pts_hs)
F_hs = tempPDF/sum(tempPDF)
F_hs = np.cumsum(F_hs)
# Nonparametric CDF for T
F_t = f_t/sum(f_t)
F_t = np.cumsum(F_t)
nonpara_dist_1 = np.transpose(np.array([pts_hs, F_hs]))
nonpara_dist_2 = np.transpose(np.array([pts_t, F_t]))
nonpara_pdf_2 = np.transpose(np.array([pts_t, f_t]))
return nonpara_dist_1, nonpara_dist_2, nonpara_pdf_2
def __gumbelCopula(self, u, alpha):
''' Calculates the Gumbel copula density
Parameters
----------
u: np.array
Vector of equally spaced points between 0 and twice the
maximum value of T.
alpha: float
Copula parameter. Must be greater than or equal to 1.
Returns
-------
y: np.array
Copula density function.
'''
#Ignore divide by 0 warnings and resulting NaN warnings
| np.seterr(all='ignore') | numpy.seterr |
import tensorflow as tf
import numpy as np
import os
import json
# SAR band names to read related GeoTIFF files
band_names_s1 = ["VV", "VH"]
# Spectral band names to read related GeoTIFF files
band_names_s2 = ['B01', 'B02', 'B03', 'B04', 'B05',
'B06', 'B07', 'B08', 'B8A', 'B09', 'B11', 'B12']
def prep_example(bands, BigEarthNet_19_labels, BigEarthNet_19_labels_multi_hot, patch_name_s1, patch_name_s2):
return tf.train.Example(
features=tf.train.Features(
feature={
'B01': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B01']))),
'B02': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B02']))),
'B03': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B03']))),
'B04': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B04']))),
'B05': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B05']))),
'B06': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B06']))),
'B07': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B07']))),
'B08': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B08']))),
'B8A': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B8A']))),
'B09': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B09']))),
'B11': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B11']))),
'B12': tf.train.Feature(
int64_list=tf.train.Int64List(value=np.ravel(bands['B12']))),
"VV": tf.train.Feature(
float_list=tf.train.FloatList(value=np.ravel(bands['VV']))),
"VH": tf.train.Feature(
float_list=tf.train.FloatList(value=np.ravel(bands['VH']))),
'BigEarthNet-19_labels': tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[i.encode('utf-8') for i in BigEarthNet_19_labels])),
'BigEarthNet-19_labels_multi_hot': tf.train.Feature(
int64_list=tf.train.Int64List(value=BigEarthNet_19_labels_multi_hot)),
'patch_name_s1': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[patch_name_s1.encode('utf-8')])),
'patch_name_s2': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[patch_name_s2.encode('utf-8')]))
}))
def create_split(root_folder_s1, root_folder_s2, patch_names, TFRecord_writer, label_indices, GDAL_EXISTED, RASTERIO_EXISTED, UPDATE_JSON):
label_conversion = label_indices['label_conversion']
BigEarthNet_19_label_idx = {v: k for k, v in label_indices['BigEarthNet-19_labels'].items()}
if GDAL_EXISTED:
import gdal
elif RASTERIO_EXISTED:
import rasterio
progress_bar = tf.contrib.keras.utils.Progbar(target = len(patch_names))
for patch_idx, patch_name in enumerate(patch_names):
patch_name_s1, patch_name_s2 = patch_name[1], patch_name[0]
patch_folder_path_s1 = os.path.join(root_folder_s1, patch_name_s1)
patch_folder_path_s2 = os.path.join(root_folder_s2, patch_name_s2)
bands = {}
for band_name in band_names_s1:
band_path = os.path.join(
patch_folder_path_s1, patch_name_s1 + '_' + band_name + '.tif')
if GDAL_EXISTED:
band_ds = gdal.Open(band_path, gdal.GA_ReadOnly)
raster_band = band_ds.GetRasterBand(1)
band_data = raster_band.ReadAsArray()
bands[band_name] = np.array(band_data)
elif RASTERIO_EXISTED:
band_ds = rasterio.open(band_path)
band_data = np.array(band_ds.read(1))
bands[band_name] = np.array(band_data)
for band_name in band_names_s2:
# First finds related GeoTIFF path and reads values as an array
band_path = os.path.join(
patch_folder_path_s2, patch_name_s2 + '_' + band_name + '.tif')
if GDAL_EXISTED:
band_ds = gdal.Open(band_path, gdal.GA_ReadOnly)
raster_band = band_ds.GetRasterBand(1)
band_data = raster_band.ReadAsArray()
bands[band_name] = | np.array(band_data) | numpy.array |
"""
First created on Mon Aug 13 10:01:03 2018
Main code for the creation of the image for Zernike analysis;
Other moduls avaliable are:
Zernike_Cutting_Module
Zernike_Analysis_Module
Versions:
Oct 31, 2018; 0.1 -> 0.11 fixed FRD effect
Nov 1, 2018; 0.11 -> 0.12 added correct edges to the detector; fixed wrong behavior for misaligment
Nov 2, 2018; 0.12 -> 0.13 added lorentzian wings to the illumination of the pupil
Nov 3, 2018; 0.13 -> 0.13b fixed edges of detector when det_vert is not 1
Nov 12, 2018; 0.13b -> 0.13c changed parameter describing hexagonal effect "f" from 0.1 to 0.2
Nov 12, 2018; 0.13c -> 0.14 changed illumination description modifying entrance -> exit pupil illumination
Nov 29, 2018; 0.14 -> 0.14b added fixed scattering slope, deduced from large image in focus
Dec 16, 2018; 0.14b -> 0.14c allparameters_proposal_err from list to array
Dec 18, 2018; 0.14c -> 0.14d strutFrac upper limit to 0.13 in create_parInit
Dec 23, 2018; 0.14d -> 0.15 refactoring so that x_ilum and y_ilum is one
Dec 26, 2018; 0.15 -> 0.15b when in focus, create exactly 10x oversampling
Dec 31, 2018; 0.15b -> 0.16 major rewrite of downsampling algorithm
Jan 8, 2019; 0.16 -> 0.17 added support for zmax=22
Jan 14, 2019; 0.17 -> 0.18 fixed bug with dowsampling algorithm - I was just taking central values
Jan 15, 2019; 0.18 -> 0.19 added simple algorithm to interpolate between 1/10 pixels in the best position
Feb 15, 2019; 0.19 -> 0.20 updated analysis for the new data
Feb 21, 2019; 0.20 -> 0.20b test parameter for showing globalparamers outside their limits
Feb 22, 2019; 0.20 -> 0.21 added support for Zernike higher than 22
Feb 22, 2019; 0.21 -> 0.21b added support for return image along side likelihood
Apr 17, 2019; 0.21b -> 0.21c changed defintion of residuals from (model-data) to (data-model)
Jun 4, 2019; 0.21c -> 0.21d slight cleaning of the code, no functional changes
Jun 26, 2019; 0.21d -> 0.21e included variable ``dataset'',
which denots which data we are using in the analysis
Jul 29, 2019; 0.21e -> 0.21f changed the spread of paramters when drawing initial solutions, based on data
Sep 11, 2019; 0.21f -> 0.21g globalparameters_flat_6<1 to globalparameters_flat_6<=1
Oct 10, 2019: 0.21g -> 0.21h scattered_light_kernel saving option
Oct 31, 2019: 0.21h -> 0.22 (re)introduced small amount of apodization (PIPE2D-463)
Oct 31, 2019: 0.22 -> 0.22b introduced verbosity
Nov 07, 2019: 0.22b -> 0.22c nan values can pass through find_single_realization_min_cut
Nov 08, 2019: 0.22c -> 0.22d changes to resizing and centering
Nov 13, 2019: 0.22d -> 0.23 major changes to centering - chief ray in the center of oversampled image
Nov 15, 2019: 0.23 -> 0.24 change likelihood definition
Dec 16, 2019: 0.24 -> 0.24a added iluminaton with z4,z11,z22=0
Jan 14, 2020: 0.24a -> 0.24b added verbosity in find_single_realization_min_cut function
Jan 31, 2020: 0.24b -> 0.25 added support for data contaning spots from two wavelengths
Feb 11, 2020: 0.25 -> 0.26 proper bilinear interpolation of the spots
Feb 17, 2020: 0.26 -> 0.26a increased speed when save parameter=0
Feb 18, 2020: 0.26a -> 0.26b mask image going through subpixel interpolation
Feb 19, 2020: 0.26b -> 0.26c normalization of sci image takes into account mask
Mar 1, 2020: 0.26c -> 0.27 apodization scales with the size of input images
Mar 4, 2020: 0.27 -> 0.28 (re-)introduced custom size of pupil image
Mar 6, 2020: 0.28 -> 0.28b refactored cut_square function (making it much faster)
Mar 8, 2020: 0.28b -> 0.28c set limit in grating factor to 120000 in generating code
Apr 1, 2020: 0.28c -> 0.28d svd_invert function
May 6, 2020: 0.28d -> 0.28e clarified and expanded comments in postprocessing part
Jun 28, 2020: 0.28e -> 0.29 added multi analysis
Jul 02, 2020: 0.29 -> 0.30 added internal fitting for flux
Jul 02, 2020: 0.30 -> 0.30a lnlike_Neven_multi_same_spot can accept both 1d and 2d input
Jul 07, 2020: 0.30a -> 0.30b added threading time information
Jul 09, 2020: 0.30b -> 0.30c expwf_grid changed to complex64 from complex128
Jul 09, 2020: 0.30c -> 0.30d changed all float64 to float32
Jul 16, 2020: 0.30d -> 0.31 moved all fft to scipy.signal.fftconvolve
Jul 20, 2020: 0.31 -> 0.32 introduced renormalization_of_var_sum for multi_var analysis
Jul 26, 2020: 0.32 -> 0.32a only changed last value of allparameters if len()==42
Aug 10, 2020: 0.32a -> 0.33 added extra Zernike to parInit
Aug 12, 2020: 0.33 -> 0.33a changed iters to 6 in fluxfit
Sep 08, 2020: 0.33a -> 0.33b added test_run to help with debugging
Oct 05, 2020: 0.33b -> 0.33c trying to always output flux multiplier when fit_for_flux
Oct 06, 2020: 0.33c -> 0.34 added posibility to specify position of created psf
Oct 13, 2020: 0.34 -> 0.34b added finishing step of centering, done with Nelder-Mead
Oct 22, 2020: 0.34b -> 0.35 added class that does Tokovinin multi analysis
Nov 03, 2020: 0.35 -> 0.35a create parInit up to z=22, with larger parametrization
Nov 05, 2020: 0.35a -> 0.35b return same value if Tokovinin does not work
Nov 16, 2020: 0.35b -> 0.35c modified movement of parameters
Nov 17, 2020: 0.35c -> 0.35d small fixes in check_global_parameters with paramters 0 and 1
Nov 19, 2020: 0.35d -> 0.36 realized that vertical strut is different than others -
first, simplest implementation
Nov 19, 2020: 0.36 -> 0.36a modified parInit movements for multi (mostly reduced)
Dec 05, 2020: 0.36a -> 0.37 misalignment and variable strut size
Dec 13, 2020: 0.37 -> 0.37a changed weights in multi_same_spot
Jan 17, 2021: 0.37a -> 0.37b accept True as input for simulation00
Jan 25, 2021: 0.37b -> 0.37c fixed fillCrop function in PsfPosition, slice limits need to be integers
Jan 26, 2021: 0.37c -> 0.38 PIPE2D-701, fixed width of struts implementation
Jan 28, 2021: 0.38 -> 0.39 added flux mask in chi**2 calculation
Jan 28, 2021: 0.39 -> 0.39b lowered allowed values for pixel_effect and fiber_r
Feb 08, 2021: 0.39b -> 0.4 fixed bilinear interpolation for secondary, x and y confusion
Feb 25, 2021: 0.4 -> 0.40a added directory for work on Tiger
Mar 05, 2021: 0.40a -> 0.41 introduced create_custom_var function
Mar 08, 2021: 0.41 -> 0.41a added suport for saving intermediate images to tiger
Mar 24, 2021: 0.41a -> 0.41b added support for masked images in find_centroid_of_flux
Mar 26, 2021: 0.41b -> 0.41c added create_custom_var function as a separate function
Mar 26, 2021: 0.41c -> 0.41d semi-implemented custom variance function in Tokovinin algorithm
Mar 26, 2021: 0.41d -> 0.41e model_multi_out has correct input parameters now
Apr 01, 2021: 0.41e -> 0.42 changed bug/feature in checking wide_43 and wide_42 parameters
Apr 02, 2021: 0.42 -> 0.43 changed width of slit shadow and slit holder shadow
Apr 04, 2021: 0.43 -> 0.44 implemented f_multiplier_factor
Apr 04, 2021: 0.44 -> 0.44a implemented possibility for using np.abs(chi) as likelihood
Apr 08, 2021: 0.44a -> 0.44b propagated change from 0.44a to Tokovinin algorithm
Apr 12, 2021: 0.44b -> 0.44c modified renormalization factors for abs(chi) value
Apr 13, 2021: 0.44c -> 0.44d fixed bug in the estimate of mean_value_of_background
Apr 14, 2021: 0.44d -> 0.44e mean_value_of_background estimated from sci or var data
Apr 22, 2021: 0.44e -> 0.44f introduced multi_background_factor
Apr 27, 2021: 0.44f -> 0.45 Tokovinin now works much quicker with multi_background_factor
(create_simplified_H updated)
Apr 29, 2021: 0.45 -> 0.45a many changes in order to run create_simplified_H efficently
May 07, 2021: 0.45a -> 0.45b if Premodel analysis failed, return 15 values
May 08, 2021: 0.45b -> 0.45c changed that images of same size do not crash out_images creation
May 14, 2021: 0.45c -> 0.45d create_parInit, changed from <> to <= and >=
May 18, 2021: 0.45d -> 0.45e testing focus constrain in Tokovinin
May 19, 2021: 0.45e -> 0.45f expanded verbosity messages in Tokovinin algorithm
May 19, 2021: 0.45f -> 0.45g testing [8., 8., 8., 8., 1., 8., 8., 8., 8.] renormalization
May 20, 2021: 0.45g -> 0.45h do not use multi_background for image in or near focus
May 27, 2021: 0.45h -> 0.45i reordered variables in LN_PFS_single, in preparation for wv analysis
May 27, 2021: 0.45i -> 0.46 changed oversampling to be always 10
Jun 08, 2021: 0.46 -> 0.46a changed to Psf_position to be able to take only_chi and center of flux
Jun 08, 2021: 0.46a -> 0.46b changed normalization so that in focus it is indentical as in pipeline
Jun 15, 2021: 0.46b -> 0.46c change limit on the initial cut of the oversampled image,
in order to handle bluer data
Jun 19, 2021: 0.46c -> 0.46d changed skimage.transform.resize to resize,
to avoid skimage.transform not avaliable in LSST
Jun 20, 2021: 0.46d -> 0.46e changed scipy.signal to signal,
and require that optPsf_cut_downsampled_scattered size is int /
no change to unit test
Jun 24, 2021: 0.46e -> 0.47 removed resize and introduced galsim resizing in Psf_position,
to be consistent with LSST pipeline
Jun 25, 2021: 0.47 -> 0.47a introduced galsim resizing in the first downsampling from natural resolution
to default=10 oversampling also
Jul 11, 2021: 0.47a -> 0.47b changed a minus factor in secondary position estimation
Jul 12, 2021: 0.47b -> 0.47c inital offset in positioning had a wrong +- sign in front
Jul 23, 2021: 0.47c -> 0.47d (only) added comments and explanations
Jul 26, 2021: 0.47d -> 0.47e changed default oversampling to 11
Jul 27, 2021: 0.47e -> 0.47f offset done in galsim, but downsampling via resize function
Aug 26, 2021: 0.47f -> 0.47g direct minimization when use_center_of_flux=True
Aug 30, 2021: 0.47g -> 0.48 offset done in LSST code now
Sep 02, 2021: 0.48 -> 0.48a done cleaning offset code (PIPE2D-880)
Sep 15, 2021: 0.48a -> 0.48b removed minor bug where array_of_var_sum was called too early,
and could fail if nan value was present
Sep 27, 2021: 0.48b -> 0.48c added explicit bool conversion to double_sources
Oct 05, 2021: 0.48c -> 0.48d further explicit bool(double_sources) covnersion in ln_pfs_single
Oct 08, 2021: 0.48d -> 0.48e Pep8 cleaning
Oct 15, 2021: 0.48e -> 0.48f forced a randomseed number in create_parInit function
Oct 25, 2021: 0.48f -> 0.49 set half of init values in create_parInit to be same as init value
Oct 26, 2021: 0.49 -> 0.49a modified create_custom_var that it does lin fit if 2nd degree fit is convex
Oct 28, 2021: 0.49a -> 0.49b modified create_custom_var so that it does not fall below min(var) value
Nov 01, 2021: 0.49b -> 0.49c create_custom_var does not change var image from step to step anymore
Nov 02, 2021: 0.49c -> 0.49d eliminated std varianble from create_simplified_H
Nov 03, 2021: 0.49d -> 0.49e PIPE2D-930; fixed reusing list_of_variance in Tokovinin
Nov 03, 2021: 0.49e -> 0.50 PIPE2D-931; modified creation of polyfit for variance image higher up
so it is done only once per sci/var/mask image combination
Nov 20, 2021: 0.50 -> 0.50a Hilo modifications
Dec 06, 2021: 0.50a -> 0.51 Zernike_estimation_preparation class
Dec 09, 2021: 0.51 -> 0.51a introduced `fixed_single_spot`
Feb 11, 2022: 0.51a -> 0.51b unified index parameter allowed to vary
Mar 18, 2022: 0.51b -> 0.51c introduced div_same par, controlling how many particles are same
Mar 24, 2022: 0.51c -> 0.51d multiple small changes, for running same illum in fiber
Apr 03, 2022: 0.51d -> 0.51e test is now analysis_type_fiber == "fixed_fiber_par"
May 05, 2022: 0.51e -> 0.51f added documentation
May 09, 2022: 0.51f -> 0.51g replaced print with logging
May 24, 2022: 0.51g -> 0.51h small changes to output testing directory
May 26, 2022: 0.51h -> 0.51i linting fixes
Jun 01, 2022: 0.51i -> 0.52 im1.setCenter(0,0), to be compatible with galsim 2.3.4
@author: <NAME>
@contact: <EMAIL>
@web: www.ncaplar.com
"""
########################################
# standard library imports
# from __future__ import absolute_import, division, logging.info_function
from functools import partial
from typing import Tuple, Iterable
# import matplotlib
# from matplotlib.colors import LogNorm
# import matplotlib.pyplot as plt
import lmfit
from scipy.linalg import svd
from scipy import signal
from scipy.ndimage.filters import gaussian_filter
import scipy.fftpack
import scipy.misc
from scipy.special import erf
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import Tophat2DKernel
import lsst.afw.math
import lsst.afw.image
import lsst.afw
import lsst
import galsim
import traceback
# import platform
import threading
# from multiprocessing import current_process
import numpy as np
import os
import time
# import sys
import math
import socket
import sys
import pickle
import logging
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
np.set_printoptions(suppress=True)
np.seterr(divide='ignore', invalid='ignore')
# logging.info(np.__config__)
########################################
# Related third party imports
# none at the moment
########################################
# Local application/library specific imports
# galsim
galsim.GSParams.maximum_fft_size = 12000
# lsst
# astropy
# import astropy
# import astropy.convolution
# scipy
# import scipy
# import skimage.transform
# import scipy.optimize as optimize
# for svd_invert function
# lmfit
# matplotlib
# needed for resizing routines
# for distributing image creation in Tokovinin algorithm
########################################
__all__ = [
'PupilFactory',
'Pupil',
'ZernikeFitterPFS',
'LN_PFS_multi_same_spot',
'LN_PFS_single',
'LNP_PFS',
'find_centroid_of_flux',
'create_parInit',
'PFSPupilFactory',
'custom_fftconvolve',
'stepK',
'maxK',
'sky_scale',
'sky_size',
'remove_pupil_parameters_from_all_parameters',
'resize',
'_interval_overlap',
'svd_invert',
'Tokovinin_multi',
'find_centroid_of_flux',
'create_popt_for_custom_var',
'create_custom_var_from_popt',
'Zernike_estimation_preparation']
__version__ = "0.52"
# classes Pupil, PupilFactory and PFSPupilFactory have different form of documentation,
# compared to other classes as they have been imported from code written by <NAME>
class Pupil(object):
"""!Pupil obscuration function.
"""
def __init__(self, illuminated, size, scale):
"""!Construct a Pupil
@param[in] illuminated 2D numpy array indicating which parts of
the pupil plane are illuminated.
@param[in] size Size of pupil plane array in meters. Note
that this may be larger than the actual
diameter of the illuminated pupil to
accommodate zero-padding.
@param[in] scale Sampling interval of pupil plane array in
meters.
"""
self.illuminated = illuminated
self.size = size
self.scale = scale
class PupilFactory(object):
"""!Pupil obscuration function factory for use with Fourier optics.
Based on the code by <NAME>, developed for HSC camera
Contains functions that can create various obscurations in the camera
"""
def __init__(
self,
pupilSize,
npix,
input_angle,
detFrac,
strutFrac,
slitFrac,
slitFrac_dy,
x_fiber,
y_fiber,
effective_ilum_radius,
frd_sigma,
frd_lorentz_factor,
det_vert,
wide_0=0,
wide_23=0,
wide_43=0,
misalign=0,
verbosity=0):
"""Construct a PupilFactory.
Parameters
----------
pupilSize: `float`
Size of the exit pupil [m]
npix: `int`
Constructed Pupils will be npix x npix
input_angle: `float`
Angle of the pupil (for all practical purposes fixed an np.pi/2)
detFrac: `float`
Value determining how much of the exit pupil obscured by the
central obscuration(detector)
strutFrac: `float`
Value determining how much of the exit pupil is obscured
by a single strut
slitFrac: `float`
Value determining how much of the exit pupil is obscured by slit
slitFrac_dy: `float`
Value determining what is the vertical position of the slit
in the exit pupil
x_fiber: `float`
Position of the fiber misaligment in the x direction
y_fiber: `float`
Position of the fiber misaligment in the y direction
effective_ilum_radius: `float`
Fraction of the maximal radius of the illumination
of the exit pupil that is actually illuminated
frd_sigma: `float`
Sigma of Gaussian convolving only outer edge, mimicking FRD
frd_lorentz_factor: `float`
Strength of the lorentzian factor describing wings
det_vert: `float`
Multiplicative factor determining vertical size
of the detector obscuration
wide_0: `float`
Widening of the strut at 0 degrees
wide_23: `float`
Widening of the strut at the top-left corner
wide_43: `float`
Widening of the strut at the bottom-left corner
misalign: `float`
Describing the amount of misaligment
verbosity: `int`
How verbose during evaluation (1 = full verbosity)
"""
self.verbosity = verbosity
if self.verbosity == 1:
logging.info('Entering PupilFactory class')
logging.info('Entering PupilFactory class')
self.pupilSize = pupilSize
self.npix = npix
self.input_angle = input_angle
self.detFrac = detFrac
self.strutFrac = strutFrac
self.pupilScale = pupilSize / npix
self.slitFrac = slitFrac
self.slitFrac_dy = slitFrac_dy
self.effective_ilum_radius = effective_ilum_radius
self.frd_sigma = frd_sigma
self.frd_lorentz_factor = frd_lorentz_factor
self.det_vert = det_vert
self.wide_0 = wide_0
self.wide_23 = wide_23
self.wide_43 = wide_43
self.misalign = misalign
u = (np.arange(npix, dtype=np.float32) - (npix - 1) / 2) * self.pupilScale
self.u, self.v = np.meshgrid(u, u)
@staticmethod
def _pointLineDistance(p0, p1, p2):
"""Compute the right-angle distance between the points given by `p0`
and the line that passes through `p1` and `p2`.
@param[in] p0 2-tuple of numpy arrays (x,y coords)
@param[in] p1 2-tuple of scalars (x,y coords)
@param[in] p2 2-tuple of scalars (x,y coords)
@returns numpy array of distances; shape congruent to p0[0]
"""
x0, y0 = p0
x1, y1 = p1
x2, y2 = p2
dy21 = y2 - y1
dx21 = x2 - x1
return np.abs(dy21 * x0 - dx21 * y0 + x2 * y1 - y2 * x1) / np.hypot(dy21, dx21)
def _fullPupil(self):
"""Make a fully-illuminated Pupil.
@returns Pupil
"""
illuminated = np.ones(self.u.shape, dtype=np.float32)
return Pupil(illuminated, self.pupilSize, self.pupilScale)
def _cutCircleInterior(self, pupil, p0, r):
"""Cut out the interior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Circular region radius
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
pupil.illuminated[r2 < r**2] = False
def _cutCircleExterior(self, pupil, p0, r):
"""Cut out the exterior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Circular region radius
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
pupil.illuminated[r2 > r**2] = False
def _cutEllipseExterior(self, pupil, p0, r, b, thetarot):
"""Cut out the exterior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r Ellipse region radius = major axis
@param[in] b Ellipse region radius = minor axis
@param[in] thetarot Ellipse region rotation
"""
r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2
theta = np.arctan(self.u / self.v) + thetarot
pupil.illuminated[r2 > r**2 * b**2 / (b**2 * (np.cos(theta))**2 + r**2 * (np.sin(theta))**2)] = False
def _cutSquare(self, pupil, p0, r, angle, det_vert):
"""Cut out the interior of a circular region from a Pupil.
@param[in,out] pupil Pupil to modify in place
@param[in] p0 2-tuple indicating region center
@param[in] r half lenght of the length of square side
@param[in] angle angle that the camera is rotated
@param[in] det_vert multiplicative factor that distorts the square into a rectangle
"""
pupil_illuminated_only1 = np.ones_like(pupil.illuminated, dtype=np.float32)
time_start_single_square = time.time()
###########################################################
# Central square
if det_vert is None:
det_vert = 1
x21 = -r / 2 * det_vert * 1
x22 = +r / 2 * det_vert * 1
y21 = -r / 2 * 1
y22 = +r / 2 * 1
i_max = self.npix / 2 - 0.5
i_min = -i_max
i_y_max = int(np.round((x22 + p0[1]) / self.pupilScale - (i_min)))
i_y_min = int(np.round((x21 + p0[1]) / self.pupilScale - (i_min)))
i_x_max = int(np.round((y22 + p0[0]) / self.pupilScale - (i_min)))
i_x_min = int(np.round((y21 + p0[0]) / self.pupilScale - (i_min)))
assert angle == np.pi / 2
# angleRad = angle
camX_value_for_f_multiplier = p0[0]
camY_value_for_f_multiplier = p0[1]
# logging.info(camX_value_for_f_multiplier,camY_value_for_f_multiplier)
camY_Max = 0.02
f_multiplier_factor = (-camX_value_for_f_multiplier * 100 / 3) * \
(np.abs(camY_value_for_f_multiplier) / camY_Max) + 1
# f_multiplier_factor=1
if self.verbosity == 1:
logging.info('f_multiplier_factor for size of detector triangle is: ' + str(f_multiplier_factor))
pupil_illuminated_only0_in_only1 = np.zeros((i_y_max - i_y_min, i_x_max - i_x_min))
u0 = self.u[i_y_min:i_y_max, i_x_min:i_x_max]
v0 = self.v[i_y_min:i_y_max, i_x_min:i_x_max]
# factor that is controling how big is the triangle in the corner of the detector?
f = 0.2
f_multiplier = f_multiplier_factor / 1
###########################################################
# Lower right corner
x21 = -r / 2
x22 = +r / 2
y21 = -r / 2 * det_vert
y22 = +r / 2 * det_vert
f_lr = np.copy(f) * (1 / f_multiplier)
angleRad21 = -np.pi / 4
triangle21 = [[p0[0] + x22, p0[1] + y21],
[p0[0] + x22, p0[1] + y21 - y21 * f_lr],
[p0[0] + x22 - x22 * f_lr, p0[1] + y21]]
p21 = triangle21[0]
y22 = (triangle21[1][1] - triangle21[0][1]) / np.sqrt(2)
y21 = 0
x21 = (triangle21[2][0] - triangle21[0][0]) / | np.sqrt(2) | numpy.sqrt |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestReplaceElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mask_is_always_zero(self):
# no replace, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mask_is_always_one(self):
# replace at 100 percent prob., should change everything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_mask_is_stochastic_parameter(self):
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
def test_mask_is_list(self):
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_replacement_is_stochastic_parameter(self):
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=0.5, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert 0.5 - 1e-6 < params[0].p.value < 0.5 + 1e-6
assert params[1].value == 2
assert params[2].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.5)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.7)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.2)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=2)
image = np.full((3, 3), 1, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 2)
# deterministic stochastic parameters are by default int32 for
# any integer value and hence cannot cover the full uint32 value
# range
if dtype.name != "uint32":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 2
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32, np.float64]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
atol = 1e-3*max_value if dtype == np.float16 else 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1.0)
image = np.full((3, 3), 0.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 1.0)
aug = iaa.ReplaceElementwise(mask=1, replacement=2.0)
image = np.full((3, 3), 1.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 2.0)
# deterministic stochastic parameters are by default float32 for
# any float value and hence cannot cover the full float64 value
# range
if dtype.name != "float64":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1], atol=0.01)
def test_pickleable(self):
aug = iaa.ReplaceElementwise(mask=0.5, replacement=(0, 255),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
class TestSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.SaltAndPepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSaltAndPepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
# Salt() occasionally replaces with 127, which probably should be the center-point here anyways
assert np.all(observed >= 127)
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.Salt(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSalt(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSalt(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSalt(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarseSalt(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSalt(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSalt(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_probability_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
assert np.all(observed <= 128)
def test_probability_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt == 0
def test_pickleable(self):
aug = iaa.Pepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarsePepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarsePepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarsePepper(p=0.5, size_px=100)
aug2 = iaa.CoarsePepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarsePepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarsePepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarsePepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarsePepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarsePepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarsePepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class Test_invert(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr)
assert observed == "foo"
args = mock_invert.call_args_list[0]
assert np.array_equal(mock_invert.call_args_list[0][0][0], arr)
assert args[1]["min_value"] is None
assert args[1]["max_value"] is None
assert args[1]["threshold"] is None
assert args[1]["invert_above_threshold"] is True
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr, min_value=1, max_value=10, threshold=5,
invert_above_threshold=False)
assert observed == "foo"
args = mock_invert.call_args_list[0]
assert np.array_equal(mock_invert.call_args_list[0][0][0], arr)
assert args[1]["min_value"] == 1
assert args[1]["max_value"] == 10
assert args[1]["threshold"] == 5
assert args[1]["invert_above_threshold"] is False
def test_uint8(self):
values = np.array([0, 20, 45, 60, 128, 255], dtype=np.uint8)
expected = np.array([
255,
255-20,
255-45,
255-60,
255-128,
255-255
], dtype=np.uint8)
observed = iaa.invert(values)
assert np.array_equal(observed, expected)
assert observed is not values
# most parts of this function are tested via Invert
class Test_invert_(unittest.TestCase):
def test_arr_is_noncontiguous_uint8(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
max_vr_flipped = np.fliplr(np.copy(zeros + 255))
observed = iaa.invert_(max_vr_flipped)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_arr_is_view_uint8(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
max_vr_view = np.copy(zeros + 255)[:, :, [0, 2]]
observed = iaa.invert_(max_vr_view)
expected = zeros[:, :, [0, 2]]
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_uint(self):
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values))
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_0_inv_above(self):
threshold = 0
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint8_with_threshold_255_inv_above(self):
threshold = 255
dtypes = ["uint8"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
60,
center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint8_with_threshold_256_inv_above(self):
threshold = 256
dtypes = ["uint8"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
60,
center_value,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
60,
center_value,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_above_with_min_max(self):
threshold = 50
# uint64 does not support custom min/max, hence removed it here
dtypes = ["uint8", "uint16", "uint32"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0, # not clipped to 10 as only >thresh affected
20,
45,
100 - 50,
100 - 90,
100 - 90
], dtype=dt)
observed = iaa.invert_(np.copy(values),
min_value=10,
max_value=100,
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_int_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["int8", "int16", "int32", "int64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([-45, -20, center_value, 20, 45, max_value],
dtype=dt)
expected = np.array([
-45,
-20,
center_value,
20,
45,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_int_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["int8", "int16", "int32", "int64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([-45, -20, center_value, 20, 45, max_value],
dtype=dt)
expected = np.array([
(-1) * (-45) - 1,
(-1) * (-20) - 1,
(-1) * center_value - 1,
(-1) * 20 - 1,
(-1) * 45 - 1,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.array_equal(observed, expected)
def test_float_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["float16", "float32", "float64", "float128"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = center_value
values = np.array([-45.5, -20.5, center_value, 20.5, 45.5,
max_value],
dtype=dt)
expected = np.array([
-45.5,
-20.5,
center_value,
20.5,
45.5,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.allclose(observed, expected, rtol=0, atol=1e-4)
def test_float_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["float16", "float32", "float64", "float128"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = center_value
values = np.array([-45.5, -20.5, center_value, 20.5, 45.5,
max_value],
dtype=dt)
expected = np.array([
(-1) * (-45.5),
(-1) * (-20.5),
(-1) * center_value,
(-1) * 20.5,
(-1) * 45.5,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.allclose(observed, expected, rtol=0, atol=1e-4)
class Test_solarize(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.solarize_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is not arr
assert np.array_equal(args[0], arr)
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.solarize_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is not arr
assert np.array_equal(args[0], arr)
assert kwargs["threshold"] == 5
assert observed == "foo"
def test_uint8(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
observed = iaa.solarize(arr)
expected = np.array([0, 10, 50, 255-150, 255-200, 255-255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_compare_with_pil(self):
import PIL.Image
import PIL.ImageOps
def _solarize_pil(image, threshold):
img = PIL.Image.fromarray(image)
return np.asarray(PIL.ImageOps.solarize(img, threshold))
image = np.mod(np.arange(20*20*3), 255).astype(np.uint8)\
.reshape((20, 20, 3))
for threshold in np.arange(256):
image_pil = _solarize_pil(image, threshold)
image_iaa = iaa.solarize(image, threshold)
assert np.array_equal(image_pil, image_iaa)
class Test_solarize_(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize_(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize_(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 5
assert observed == "foo"
class TestInvert(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_one(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0).augment_image(zeros + 255)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_p_is_zero(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=0.0).augment_image(zeros + 255)
expected = zeros + 255
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_max_value_set(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0, max_value=200).augment_image(zeros + 200)
expected = zeros
assert observed.dtype.name == "uint8"
assert | np.array_equal(observed, expected) | numpy.array_equal |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
# Only difference from the original (hence the above Copyright): .variances -> .variance
import numpy as np
from GPy.kern.src.kern import Kern
from GPy.util.linalg import tdot
from GPy.core.parameterization import Param
from paramz.transformations import Logexp
from paramz.caching import Cache_this
from GPy.kern.src.psi_comp import PSICOMP_Linear
class Linear(Kern):
"""
Linear kernel
.. math::
k(x,y) = \sum_{i=1}^{\\text{input_dim}} \sigma^2_i x_iy_i
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the vector of variance :math:`\sigma^2_i`
:type variance: array or list of the appropriate size (or float if there
is only one variance parameter)
:param ARD: Auto Relevance Determination. If False, the kernel has only one
variance parameter \sigma^2, otherwise there is one variance
parameter per dimension.
:type ARD: Boolean
:rtype: kernel object
"""
def __init__(self, input_dim, variance=None, ARD=False, active_dims=None, name='linear'):
super(Linear, self).__init__(input_dim, active_dims, name)
self.ARD = ARD
if not ARD:
if variance is not None:
variance = np.asarray(variance)
assert variance.size == 1, "Only one variance needed for non-ARD kernel"
else:
variance = np.ones(1)
else:
if variance is not None:
variance = np.asarray(variance)
assert variance.size == self.input_dim, "bad number of variance, need one ARD variance per input_dim"
else:
variance = np.ones(self.input_dim)
self.variance = Param('variance', variance, Logexp())
self.link_parameter(self.variance)
self.psicomp = PSICOMP_Linear()
def to_dict(self):
input_dict = super(Linear, self)._save_to_input_dict()
input_dict["class"] = "GPy.kern.Linear"
input_dict["variance"] = self.variance.values.tolist()
input_dict["ARD"] = self.ARD
return input_dict
@staticmethod
def _build_from_input_dict(kernel_class, input_dict):
useGPU = input_dict.pop('useGPU', None)
return Linear(**input_dict)
@Cache_this(limit=3)
def K(self, X, X2=None):
if self.ARD:
if X2 is None:
return tdot(X*np.sqrt(self.variance))
else:
rv = np.sqrt(self.variance)
return | np.dot(X*rv, (X2*rv).T) | numpy.dot |
import sys
import json
import pickle
import random
from pathlib import Path
import csv
import hashlib
import numpy as np
import open3d as o3d
from loguru import logger as logging
from tools.tests.ray_ground_filter import RayGroundFilter
from tools.tests.object_utils import Box, ObjectWithBox, ObjectManipulator, VisualizerSequence
# set seed for debug
seed = random.randrange(sys.maxsize)
# seed = 1000
random.seed(seed)
logging.info('Random seed: {}'.format(seed))
class SceneGenerator(object):
def __init__(self, cloud_data_folder, output_folder):
self.cloud_data_folder = cloud_data_folder
self.output_folder = output_folder
self.output_cloud_file = None # path to save output cloud .bin file
self.output_label_file = None # path to save output label .txt file
self.label_data_dict = None # label data dict of the original scene
self.scene_labels = None # label data dict of the generated scene
self.output_file_name = None
self.cloud = None # cloud as numpy ndarray type
self.pcd = None # cloud as Open3d type
self.scene_points = None # generated scene cloud as numpy ndarray type
self.point_distance_buffer = None
self.lidar_mask_buffer = None
self.selected_objects = list()
self.labels_of_objects = list()
self.labels_of_valid_objects = list()
self.object_manipulator = None
self.create_object_manipulator()
# num of each classes in a scene
self.num_of_objects = {'Car': 15, 'Truck': 5, 'Tricar': 5, 'Cyclist': 10, 'Pedestrian': 10}
# radial distance range of each classes in a scene, can be set as absolute or relative
# -- absolute
# self.range_of_distances = {'Car': [5.0, 100.0],
# 'Truck': [8.0, 120.0],
# 'Tricar': [5.0, 80.0],
# 'Cyclist': [5.0, 80.0],
# 'Pedestrian': [5.0, 60.0]}
# -- relative
self.range_of_distances = {'Car': [-10.0, 10.0],
'Truck': [-10.0, 10.0],
'Tricar': [-10.0, 10.0],
'Cyclist': [-10.0, 10.0],
'Pedestrian': [-10.0, 10.0]}
# additional random rotation angle range applied to each object
self.additional_rotation_range = 30.0 # deg
# elevation angle range set to each object to control its height
self.elevation_angle_range = 2.0 # deg
def create_object_manipulator(self):
# configure the object manipulator and the transform between the original lidar frame and current frame
origin_lidar_rotation = [3.13742, -3.1309, 3.14101]
origin_lidar_location = [-2.87509, -0.00462392, 1.83632]
self.object_manipulator = ObjectManipulator()
self.object_manipulator.init_lidar_transform(origin_lidar_rotation, origin_lidar_location)
# configure lidar elevation angle distribution
lidar_elevation_file = 'test_data/VLS-128-Figure9-8-Azimuth Offsets by Elevation.csv'
azimuth_angle_increment = 0.2 # deg
ring_index = list()
elevation_angle = list()
with open(lidar_elevation_file, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
line_num = 0
for row in csvreader:
if line_num > 0:
ring_index.append(int(row[0]))
elevation_angle.append(float(row[1]))
line_num += 1
self.object_manipulator.init_lidar_param(ring_index, elevation_angle, azimuth_angle_increment)
def remove_original_objects(self):
self.pcd = o3d.geometry.PointCloud()
self.pcd.points = o3d.utility.Vector3dVector(self.cloud[:, :3])
# -- iterate for each object
objs = self.label_data_dict['gts']
for p in objs:
# ignore DontCare objects
if p['class_name'] == 'DontCare':
continue
# construct 3d box
bbox = o3d.geometry.OrientedBoundingBox(
center=p['location'],
R=o3d.geometry.OrientedBoundingBox.get_rotation_matrix_from_xyz(p['rotation']),
extent=p['dimension'],
)
# crop the object points
object_points = self.pcd.crop(bbox)
# check if not empty
if | np.asarray(object_points.points) | numpy.asarray |
import math
import re
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from fibber import log
from fibber.metrics.bert_lm_utils import get_lm
from fibber.paraphrase_strategies.asrs_utils_text_parser import TextParser
from fibber.paraphrase_strategies.asrs_utils_wpe import get_wordpiece_emb
from fibber.paraphrase_strategies.strategy_base import StrategyBase
logger = log.setup_custom_logger(__name__)
POST_PROCESSING_PATTERN = [
(r"\s+n\s", "n "),
(r"\s*'\s*t\s", "'t "),
(r"\s*'\s*s\s", "'s "),
(r"\s*'\s*ve\s", "'ve "),
(r"\s*'\s*ll\s", "'ll "),
(r"\s*n't\s", "n't "),
(r"- -", "--"),
(r"\s*([\.,?!])", r"\1"),
(r"\s+-\s+", "-"),
]
PRE_PROCESSING_PATTERN = [
(r"can't\s", r" cannot "),
(r"won't\s", r" will not "),
(r"n't\s", r" not "),
(r"'ll\s", r" will "),
(r"'ve\s", r" have "),
]
AUTO_SENTENCE_LEN_THRESHOLD = 50 # 50 words
def process_text(text, patterns):
"""Processing the text using regex patterns.
Args:
text (str): the str to be post processed.
patterns (list): a list of substitution patterns.
"""
for pattern in patterns:
text = re.sub(pattern[0], pattern[1], text)
return text
def tostring(tokenizer, seq):
"""Convert a sequence of word ids to a sentence. The post prossing is applied.
Args:
tokenizer (transformers.BertTokenizer): a BERT tokenizer.
seq (list): a list-like sequence of word ids.
"""
return process_text(tokenizer.decode(seq), POST_PROCESSING_PATTERN)
def sample_word_from_logits(logits, temperature=1., top_k=0):
"""Sample a word from a distribution.
Args:
logits (torch.Tensor): tensor of logits with size ``(batch_size, vocab_size)``.
temperature (float): the temperature of softmax. The PMF is
``softmax(logits/temperature)``.
top_k (int): if ``k>0``, only sample from the top k most probable words.
"""
logits = logits / temperature
if top_k > 0:
kth_vals, kth_idx = logits.topk(top_k, dim=-1)
dist = torch.distributions.categorical.Categorical(logits=kth_vals)
idx = kth_idx.gather(dim=1, index=dist.sample().unsqueeze(-1)).squeeze(-1)
else:
dist = torch.distributions.categorical.Categorical(logits=logits)
idx = dist.sample().squeeze(-1)
return idx
def all_accept_criteria(candidate_ids, stats, **kargs):
"""Always accept proposed words.
Args:
candidate_ids (torch.Tensor): proposed word ids in this sampling step with
size ``(batch_size, pos_ed-pos_st)``.
stats (dict): a dict to keep track the accept rate.
Returns:
(np.array, None)
np.array is the same as candidate_ids.
None means this criteria does not have any state.
"""
stats["accept"] += len(candidate_ids)
stats["all"] += len(candidate_ids)
return candidate_ids, None
def sim_criteria_score(origin, paraphrases, sim_metric, sim_threshold, sim_weight):
"""Estimate the score of a sentence using USE.
Args:
origin (str): original sentence.
paraphrases ([str]): a list of paraphrase_list.
sim_metric (MetricBase): a similarity metric object.
sim_threshold (float): the universal sentence encoder similarity threshold.
sim_weight (float): the weight parameter for the criteria.
Returns:
(np.array): a numpy array of size ``(batch_size,)``. All entries ``<=0``.
"""
if sim_weight == 0:
return np.zeros(len(paraphrases), dtype="float32")
use_semantic_similarity = sim_metric.measure_batch(origin, paraphrases)
return -sim_weight * (
np.maximum(sim_threshold - | np.asarray(use_semantic_similarity) | numpy.asarray |
import numpy as np
from scipy.optimize import minimize
from numpy.linalg.linalg import LinAlgError
from numpy.linalg import inv, cholesky
from numpy import log, sum, diagonal
class Regression:
"""
Return a function that should be minimized
log_likelihood with gradient data involves.
"""
def __init__(self, kernel_regression_kwargs=None,
mean_regression_kwargs=None, optimized=False):
# self.kernel_regression=None or KernelRegression()
self.kernel_regression_kwargs = kernel_regression_kwargs or \
{"method": "BFGS"}
# self.mean_regression=None or MeanRegression()
self.mean_regression_kwargs = mean_regression_kwargs or {}
self.optimized = optimized
def __call__(self, *args, kernel=None, mean=None, **kwargs):
if mean is not None:
self.mean_regression(mean, kernel, *args, **kwargs)
if kernel is not None:
self.kernel_regression(kernel, mean, *args, **kwargs)
def kernel_regression(self, kernel, mean, *args, data=None, **kwargs):
"""
k : class Kernel; k(X, X) ((DxN + 1) x m) x ((DxN + 1) x n) array
X : imgdata['X']; position of atoms, (D x N) x m dimension array
Y : imgdata['Y']; energy and forces of atoms One dimensional array
with length (m + m x (D x N)) Potential comes first.
m : mean function
M : a number of data
"""
k, m, x0 = kernel, mean, kernel.get_hyperparameters()
likelihood = self.likelihood(kernel, mean, data)
res = minimize(likelihood, x0=x0, **self.kernel_regression_kwargs)
k.set_hyperparameters(res.x)
def mean_regression(self, mean, kernel, *args, data=None, **kwargs):
mean.set_hyperparameters(data=data)
def likelihood(self, kernel=None, mean=None, data=None):
k, m = kernel, mean
X = data['kernel']['X']
Y = data['kernel']['Y']
Y_m = Y - m(X)
def likelihood(hyperparameters):
k.set_hyperparameters(hyperparameters)
K = k(X, X, noise=True)
detK = np.linalg.det(K)
try:
detK = diagonal(cholesky(K))
log_detK = sum(log(detK))
except LinAlgError:
# Postive definite matrix
detK = np.linalg.det(K)
# print(detK)
if detK <= 1e-5:
log_detK = -5
else:
log_detK = log(detK)
return log_detK + 0.5 * (Y_m.T @ (inv(K) @ Y_m))
return likelihood
def reg_kwargs(self, regression_method=None, hyperparameters=None,
hyperparameters_bounds=None):
if regression_method is None:
regression_method = self.regression_method
if hyperparameters is None:
hyperparameters = self.hyperparameters
no_boundary = False
if hyperparameters_bounds is None:
hyperparameters_bounds = self.hyperparameters_bounds
if hyperparameters_bounds == {}:
no_boundary = True
method = regression_method
number_of_hyperparameters = len(self.kernel.key2idx)
x0 = np.zeros(number_of_hyperparameters)
bounds = np.zeros((number_of_hyperparameters, 2))
for key, idx in self.kernel.key2idx.items():
x0[idx] = hyperparameters[key]
if not no_boundary:
bounds[idx] = hyperparameters_bounds[key]
if no_boundary:
bounds = None
return {'x0': x0, 'bounds': bounds, 'method': method}
class NonGradientRegression(Regression):
def likelihood(hyperparameters_list):
k.set_hyperparameters(hyperparameters_list)
K = k(X, X, orig=True)
try:
detK = diagonal(cholesky(K))
log_detK = sum(log(detK))
except LinAlgError:
# Postive definite matrix
detK = np.linalg.det(K)
if detK <= 1e-5:
log_detK = -5
else:
log_detK = log(detK)
return log_detK + 0.5 * (Y_m.T @ (inv(K) @ Y_m))
class PseudoGradientRegression(Regression):
def calculate(self):
data = self.get_data(paths)
D, M, P = data['X'].shape
_X = data['X']
_V = data['V']
_F = data['F']
X = np.zeros((D, M, (D * M + 1) * P))
Y = | np.zeros((D * M + 1) * P) | numpy.zeros |
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from ttt.models import Game, CPUAgent, HumanAgent, Action
@pytest.mark.unit
def test_game_initializes_correctly():
agent_1 = CPUAgent()
agent_2 = HumanAgent()
game = Game(agent_1, agent_2)
assert game.player_1 == agent_1
assert game.player_2 == agent_2
assert_array_equal(game.player_1.grid, np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]))
assert_array_equal(game.player_2.grid, np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]))
assert_array_equal(game.grid, np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]))
assert_array_equal(game.game_sequence, np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0]]))
@pytest.mark.unit
@pytest.mark.parametrize("move, player_id, expected_result", [
(Action.top_center, 1, np.array([0, 1, 0, 0, 0, 0, 0, 0, 0])),
(Action.bottom_right, 2, np.array([0, 0, 0, 0, 0, 0, 0, 0, 2])),
])
def test_game_applies_move_correctly(move, player_id, expected_result):
game = Game(CPUAgent(), CPUAgent())
game.apply_move(move, player_id)
assert_array_equal(game.grid, expected_result)
@pytest.mark.unit
@pytest.mark.parametrize("grid, move, player_id, expected_result", [
(np.array([1, 1, 0, 0, 0, 0, 0, 0, 0]), Action.top_right, 1, 1),
(np.array([2, 2, 0, 0, 0, 0, 0, 0, 0]), Action.top_right, 2, 2),
(np.array([1, 1, 0, 0, 0, 0, 0, 0, 0]), Action.middle_left, 1, -1),
(np.array([1, 0, 0, 1, 0, 0, 0, 0, 0]), Action.bottom_left, 1, 1),
(np.array([2, 0, 0, 2, 0, 0, 0, 0, 0]), Action.bottom_left, 2, 2),
(np.array([1, 0, 0, 1, 0, 0, 0, 0, 0]), Action.bottom_center, 1, -1),
(np.array([1, 0, 0, 0, 1, 0, 0, 0, 0]), Action.bottom_right, 1, 1),
(np.array([2, 0, 0, 0, 2, 0, 0, 0, 0]), Action.bottom_right, 2, 2),
(np.array([1, 0, 0, 0, 1, 0, 0, 0, 0]), Action.bottom_center, 1, -1)
])
def test_game_detects_victory_correctly(grid, move, player_id, expected_result):
game = Game(CPUAgent(), CPUAgent())
game.grid = grid
game.apply_move(move, player_id)
assert game.result == expected_result
@pytest.mark.unit
@pytest.mark.parametrize("grid, expected_grid_is_full, expected_result, force_win", [
(np.array([1, 2, 2, 2, 2, 1, 1, 1, 0]), False, -1, False),
(np.array([1, 2, 2, 2, 2, 1, 1, 1, 0]), False, 1, True),
(np.array([1, 2, 2, 2, 2, 1, 1, 1, 2]), True, 0, False),
(np.array([1, 2, 2, 2, 2, 1, 1, 1, 2]), True, 1, True),
])
def test_game_detects_grid_is_full_correctly(grid, expected_grid_is_full, expected_result, force_win):
game = Game(CPUAgent(), CPUAgent())
game.grid = grid
if force_win:
game.result = 1
game.check_if_grid_is_full()
assert game.grid_is_full == expected_grid_is_full
assert game.result == expected_result
@pytest.mark.unit
def test_game_traces_game_sequence_correctly():
game = Game(CPUAgent(), CPUAgent())
game.apply_move(Action.top_left, 1)
game.apply_move(Action.top_center, 2)
game.apply_move(Action.top_right, 2)
game.apply_move(Action.middle_left, 2)
game.apply_move(Action.middle_center, 2)
game.apply_move(Action.middle_right, 1)
game.apply_move(Action.bottom_left, 1)
game.apply_move(Action.bottom_center, 1)
game.apply_move(Action.bottom_right, 2)
expected_game_sequence = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 2, 2, 0, 0, 0, 0, 0, 0],
[1, 2, 2, 2, 0, 0, 0, 0, 0],
[1, 2, 2, 2, 2, 0, 0, 0, 0],
[1, 2, 2, 2, 2, 1, 0, 0, 0],
[1, 2, 2, 2, 2, 1, 1, 0, 0],
[1, 2, 2, 2, 2, 1, 1, 1, 0],
[1, 2, 2, 2, 2, 1, 1, 1, 2],
])
assert_array_equal(game.game_sequence, expected_game_sequence)
@pytest.mark.unit
@pytest.mark.parametrize("grid, expected_result", [
(np.array([1, 1, 1, 0, 0, 0, 0, 0, 0]), 1),
(np.array([2, 2, 2, 0, 0, 0, 0, 0, 0]), 2),
(np.array([0, 0, 0, 1, 1, 1, 0, 0, 0]), 1),
(np.array([0, 0, 0, 2, 2, 2, 0, 0, 0]), 2),
(np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]), 1),
(np.array([0, 0, 0, 0, 0, 0, 2, 2, 2]), 2),
(np.array([1, 0, 0, 1, 0, 0, 1, 0, 0]), -1)
])
def test_game_checks_horizontal_win_correctly(grid, expected_result):
game = Game(CPUAgent(), CPUAgent())
game.grid = grid
game.check_horizontal_win()
assert game.result == expected_result
@pytest.mark.unit
@pytest.mark.parametrize("grid, expected_result", [
(np.array([1, 0, 0, 1, 0, 0, 1, 0, 0]), 1),
(np.array([2, 0, 0, 2, 0, 0, 2, 0, 0]), 2),
(np.array([0, 1, 0, 0, 1, 0, 0, 1, 0]), 1),
(np.array([0, 2, 0, 0, 2, 0, 0, 2, 0]), 2),
(np.array([0, 0, 1, 0, 0, 1, 0, 0, 1]), 1),
(np.array([0, 0, 2, 0, 0, 2, 0, 0, 2]), 2),
(np.array([1, 1, 1, 0, 0, 0, 0, 0, 0]), -1)
])
def test_game_checks_vertical_win_correctly(grid, expected_result):
game = Game(CPUAgent(), CPUAgent())
game.grid = grid
game.check_vertical_win()
assert game.result == expected_result
@pytest.mark.unit
@pytest.mark.parametrize("grid, expected_result", [
(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]), 1),
(np.array([2, 0, 0, 0, 2, 0, 0, 0, 2]), 2),
(np.array([0, 0, 1, 0, 1, 0, 1, 0, 0]), 1),
(np.array([0, 0, 2, 0, 2, 0, 2, 0, 0]), 2),
( | np.array([1, 1, 1, 0, 0, 0, 0, 0, 0]) | numpy.array |
from .MobileAgent import MobileAgent
import numpy as np
from cvxopt import solvers, matrix
from numpy.matlib import repmat
from numpy import zeros, eye, ones, sqrt, asscalar, log
class BarrierFunction(MobileAgent):
def __init__(self):
self.t = 0.5
self.gamma = 2
self.half_plane_ABC = []
self.d_min = 2
MobileAgent.__init__(self);
self.safe_set = [0,0,0]
def calc_control_input(self, dT, goal, fx, fu, Xr, Xh, dot_Xr, dot_Xh, Mr, Mh, p_Mr_p_Xr, p_Mh_p_Xh, u0, min_u, max_u):
dim = | np.shape(Mr) | numpy.shape |
# coding utf-8
from numpy.lib.function_base import rot90
from scipy.spatial.distance import cdist
from sklearn.neighbors import KNeighborsClassifier
from sklearn import mixture
from collections import Counter
import json
import random
import numpy as np
from sklearn.metrics import euclidean_distances
import ot
import os
import joblib
from ot.optim import line_search_armijo
def norm_max(x):
for i in range(x.shape[1]):
tmax = x[:, i].max()
x[:, i] = x[:, i] / tmax
return x
def load_from_file(root_dir, filename, ss, ts):
f1 = root_dir + filename
with open(f1, 'r') as f:
s = f.read()
data = json.loads(s)
xs, ys, xt, yt = np.array(data[ss]['x']), np.array(data[ss]['y']), np.array(data[ts]['x']), np.array(
data[ts]['y'])
xs = norm_max(xs)
xt = norm_max(xt)
ys = np.squeeze(ys)
yt = | np.squeeze(yt) | numpy.squeeze |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from supra.Utils.Classes import Constants
consts = Constants()
def getPressure(z):
p = 10*101.325*np.exp(-0.00012*z)*100
# in Pa
return p
def anglescan(S, phi, theta, z_profile, vfreq, P_amb, wind=True, debug=True, trace=False, plot=False):
# Originally by <NAME> (Supracenter)
""" Ray-traces from a point given initial launch angles
Arguments:
S: [list] [x, y, z] of initial launch point (Supracenter or Wave-Release point)
phi: [float] initial azimuthal angle of launch [deg] with 0 deg being North and 90 deg being East
theta: [float] initial takeoff angle of launch [deg] with 90 deg being horizontal and 180 deg being vertically down
z_profile: [list] weather profile (n_layers * 4)
[[heights (increasing order) [m], speed of sound [m/s], wind speed [m/s], wind direction [rad] (same angle definition as phi)],
... ]
Keyword Arguments:
wind: [Boolean] if False sets all wind speeds to 0
debug: [Boolean] if True outputs print messages of program status
trace: [Boolean] if True returns (x, y, z, t) coordinates of the ray trace
plot: [Boolean] if True plots the ray trace
Returns:
D: [list] (x, y, z, t) final position and travel time of the raytrace
T: [list] returned if trace is set to True, (x, y, z, t) of all points along the ray-trace
"""
b_const = 1.119e-4
k_const = 2.0e-4
T = z_profile[-1, 1]
P = getPressure(z_profile[-1, 0])
# Azimuths and Wind directions are measured as angles from north, and increasing clockwise to the East
phi = (phi - 90)%360
# Flip coordinate system horizontally
phi = (360 - phi)%360
phi = np.radians(phi)
theta = np.radians(theta)
# Switch to turn off winds
if not wind:
z_profile[:, 2] = 0
# z_profile[:, 1] = 330
# The number of layers in the integration region
n_layers = len(z_profile)
# Slowness, as defined in SUPRACENTER on pg 35, s = 1/c
s = 1.0/z_profile[0:n_layers, 1]
# Elevation for that layer
z = z_profile[0:n_layers, 0]
# Component of wind vector in the direction of phi and phi + pi/2 respectively
u = z_profile[:, 2]*np.sin(z_profile[:, 3])*np.cos(phi) + z_profile[:, 2]*np.cos(z_profile[:, 3])*np.sin(phi)
v = z_profile[:, 2]* | np.sin(z_profile[:, 3]) | numpy.sin |
# -*- coding: utf-8 -*-
import numpy as np
def relu(x):
return np.maximum(0, x)
def relu_back(x,dout):
dx = np.array(dout, copy=True)
dx[x <= 0] = 0
return dx
def conv_forward_naive(_input,_filter,b,pad,stride):
(m, n_h, n_w, n_C_prev) = _input.shape
(f,f, n_C_prev, n_C) = _filter.shape
n_H = int(1 + (n_h + 2 * pad - f) / stride)
n_W = int(1 + (n_w + 2 * pad - f) / stride)
a_prev_pad = np.pad(_input, ((0,0),(pad,pad),(pad,pad),(0,0)), 'constant', constant_values=0)
Z = np.zeros((m, n_H,n_W,n_C))
for i in range(m):
for h in range(n_H):
for w in range(n_W):
for c in range(n_C):
vert_start = h*stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
a_slice = a_prev_pad[i,vert_start:vert_end,horiz_start:horiz_end,:]
Z[i,h,w,c] = np.sum(np.multiply(a_slice, _filter[:,:,:,c]))
return Z + b[None,None,None,:]
def conv_back_naive(_input,_filter,pad,stride,dout):
(m, n_h, n_w, n_C_prev) = _input.shape
(f,f, n_C_prev, n_C) = _filter.shape
n_H = int(1 + (n_h + 2 * pad - f) / stride)
n_W = int(1 + (n_w + 2 * pad - f) / stride)
a_prev_pad = np.pad(_input, ((0,0),(pad,pad),(pad,pad),(0,0)), 'constant', constant_values=0)
dw = np.zeros(_filter.shape,dtype=np.float32)
dx = np.zeros(_input.shape,dtype=np.float32)
for h in range(f):
for w in range(f):
for p in range(n_C_prev):
for c in range(n_C):
# go through all the individual positions that this filter affected and multiply by their dout
a_slice = a_prev_pad[:,h:h + n_H * stride:stride,w:w + n_W * stride:stride,p]
dw[h,w,p,c] = np.sum(a_slice * dout[:,:,:,c])
# TODO: put back in dout to get correct gradient
dx_pad = np.pad(dx, ((0,0),(pad,pad),(pad,pad),(0,0)), 'constant', constant_values=0)
for i in range(m):
for h_output in range(n_H):
for w_output in range(n_W):
for g in range(n_C):
vert_start = h_output*stride
vert_end = vert_start + f
horiz_start = w_output * stride
horiz_end = horiz_start + f
dx_pad[i,vert_start:vert_end,horiz_start:horiz_end,:] += _filter[:,:,:,g] * dout[i,h_output,w_output,g]
dx = dx_pad[:,pad:pad+n_h,pad:pad+n_w,:]
db = np.sum(dout,axis=(0,1,2))
return dw,dx,db
def max_pooling(prev_layer, filter_size=2,stride=2):
(m, n_H_prev, n_W_prev, channels) = prev_layer.shape
stride = 2
n_H = int((n_H_prev - filter_size)/stride + 1)
n_W = int((n_W_prev - filter_size)/stride + 1)
pooling = | np.zeros((m,n_H,n_W,channels)) | numpy.zeros |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = | np.array([]) | numpy.array |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.