prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import sys
from operator import itemgetter
import cv2
import matplotlib.pyplot as plt
import numpy as np
# -----------------------------#
# 计算原始输入图像
# 每一次缩放的比例
# -----------------------------#
def calculateScales(img):
pr_scale = 1.0
h, w, _ = img.shape
# --------------------------------------------#
# 将最大的图像大小进行一个固定
# 如果图像的短边大于500,则将短边固定为500
# 如果图像的长边小于500,则将长边固定为500
# --------------------------------------------#
if min(w, h) > 500:
pr_scale = 500.0 / min(h, w)
w = int(w * pr_scale)
h = int(h * pr_scale)
elif max(w, h) < 500:
pr_scale = 500.0 / max(h, w)
w = int(w * pr_scale)
h = int(h * pr_scale)
# ------------------------------------------------#
# 建立图像金字塔的scales,防止图像的宽高小于12
# ------------------------------------------------#
scales = []
factor = 0.709
factor_count = 0
minl = min(h, w)
while minl >= 12:
scales.append(pr_scale * pow(factor, factor_count))
minl *= factor
factor_count += 1
return scales
# -----------------------------#
# 将长方形调整为正方形
# -----------------------------#
def rect2square(rectangles):
w = rectangles[:, 2] - rectangles[:, 0]
h = rectangles[:, 3] - rectangles[:, 1]
l = np.maximum(w, h).T
rectangles[:, 0] = rectangles[:, 0] + w * 0.5 - l * 0.5
rectangles[:, 1] = rectangles[:, 1] + h * 0.5 - l * 0.5
rectangles[:, 2:4] = rectangles[:, 0:2] + | np.repeat([l], 2, axis=0) | numpy.repeat |
"""
Python implementation of the LiNGAM algorithms.
The LiNGAM Project: https://sites.google.com/site/sshimizu06/lingam
"""
import itertools
import numbers
import warnings
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.utils import check_array
from .direct_lingam import DirectLiNGAM
from .hsic import hsic_test_gamma
from .utils import predict_adaptive_lasso, find_all_paths
class LongitudinalLiNGAM:
"""Implementation of Longitudinal LiNGAM algorithm [1]_
References
----------
.. [1] <NAME>, <NAME>, and <NAME>. Estimation of causal structures
in longitudinal data using non-Gaussianity. In Proc. 23rd IEEE International
Workshop on Machine Learning for Signal Processing (MLSP2013), pp. 1--6, Southampton, United Kingdom, 2013.
"""
def __init__(self, n_lags=1, measure="pwling", random_state=None):
"""Construct a model.
Parameters
----------
n_lags : int, optional (default=1)
Number of lags.
measure : {'pwling', 'kernel'}, default='pwling'
Measure to evaluate independence : 'pwling' or 'kernel'.
random_state : int, optional (default=None)
``random_state`` is the seed used by the random number generator.
"""
self._n_lags = n_lags
self._measure = measure
self._random_state = random_state
self._causal_orders = None
self._adjacency_matrices = None
def fit(self, X_list):
"""Fit the model to datasets.
Parameters
----------
X_list : list, shape [X, ...]
Longitudinal multiple datasets for training, where ``X`` is an dataset.
The shape of ``X`` is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
# Check parameters
if not isinstance(X_list, (list, np.ndarray)):
raise ValueError("X_list must be a array-like.")
if len(X_list) < 2:
raise ValueError("X_list must be a list containing at least two items")
self._T = len(X_list)
self._n = check_array(X_list[0]).shape[0]
self._p = check_array(X_list[0]).shape[1]
X_t = []
for X in X_list:
X = check_array(X)
if X.shape != (self._n, self._p):
raise ValueError("X_list must be a list with the same shape")
X_t.append(X.T)
M_tau, N_t = self._compute_residuals(X_t)
B_t, causal_orders = self._estimate_instantaneous_effects(N_t)
B_tau = self._estimate_lagged_effects(B_t, M_tau)
# output B(t,t), B(t,t-τ)
self._adjacency_matrices = np.empty(
(self._T, 1 + self._n_lags, self._p, self._p)
)
self._adjacency_matrices[:, :] = np.nan
for t in range(1, self._T):
self._adjacency_matrices[t, 0] = B_t[t]
for l in range(self._n_lags):
if t - l != 0:
self._adjacency_matrices[t, l + 1] = B_tau[t, l]
self._residuals = np.zeros((self._T, self._n, self._p))
for t in range(self._T):
self._residuals[t] = N_t[t].T
self._causal_orders = causal_orders
return self
def bootstrap(self, X_list, n_sampling, start_from_t=1):
"""Evaluate the statistical reliability of DAG based on the bootstrapping.
Parameters
----------
X_list : array-like, shape (X, ...)
Longitudinal multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
n_sampling : int
Number of bootstrapping samples.
Returns
-------
results : array-like, shape (BootstrapResult, ...)
Returns the results of bootstrapping for multiple datasets.
"""
# Check parameters
if not isinstance(X_list, (list, np.ndarray)):
raise ValueError("X_list must be a array-like.")
if len(X_list) < 2:
raise ValueError("X_list must be a list containing at least two items")
self._T = len(X_list)
self._n = check_array(X_list[0]).shape[0]
self._p = check_array(X_list[0]).shape[1]
X_t = []
for X in X_list:
X = check_array(X)
if X.shape != (self._n, self._p):
raise ValueError("X_list must be a list with the same shape")
X_t.append(X)
# Bootstrapping
adjacency_matrices = np.zeros(
(n_sampling, self._T, 1 + self._n_lags, self._p, self._p)
)
total_effects = np.zeros((n_sampling, self._T * self._p, self._T * self._p))
for i in range(n_sampling):
resampled_X_t = np.empty((self._T, self._n, self._p))
indices = np.random.randint(0, self._n, size=(self._n,))
for t in range(self._T):
resampled_X_t[t] = X_t[t][indices, :]
self.fit(resampled_X_t)
adjacency_matrices[i] = self._adjacency_matrices
# Calculate total effects
for from_t in range(start_from_t, self._T):
for c, from_ in enumerate(self._causal_orders[from_t]):
to_t = from_t
for to in self._causal_orders[from_t][c + 1 :]:
total_effects[
i, to_t * self._p + to, from_t * self._p + from_
] = self.estimate_total_effect(X_t, from_t, from_, to_t, to)
for to_t in range(from_t + 1, self._T):
for to in self._causal_orders[to_t]:
total_effects[
i, to_t * self._p + to, from_t * self._p + from_
] = self.estimate_total_effect(X_t, from_t, from_, to_t, to)
return LongitudinalBootstrapResult(self._T, adjacency_matrices, total_effects)
def estimate_total_effect(self, X_t, from_t, from_index, to_t, to_index):
"""Estimate total effect using causal model.
Parameters
----------
X_t : array-like, shape (n_samples, n_features)
Original data, where n_samples is the number of samples
and n_features is the number of features.
from _t :
The timepoint of source variable.
from_index :
Index of source variable to estimate total effect.
to_t :
The timepoint of destination variable.
to_index :
Index of destination variable to estimate total effect.
Returns
-------
total_effect : float
Estimated total effect.
"""
# Check from/to causal order
if to_t == from_t:
from_order = self._causal_orders[to_t].index(from_index)
to_order = self._causal_orders[from_t].index(to_index)
if from_order > to_order:
warnings.warn(
f"The estimated causal effect may be incorrect because "
f"the causal order of the destination variable (to_t={to_t}, to_index={to_index}) "
f"is earlier than the source variable (from_t={from_t}, from_index={from_index})."
)
elif to_t < from_t:
warnings.warn(
f"The estimated causal effect may be incorrect because "
f"the causal order of the destination variable (to_t={to_t}) "
f"is earlier than the source variable (from_t={from_t})."
)
# X + lagged X
# n_features * (to + from + n_lags)
X_joined = np.zeros((self._n, self._p * (2 + self._n_lags)))
X_joined[:, 0 : self._p] = X_t[to_t]
for tau in range(1 + self._n_lags):
pos = self._p + self._p * tau
X_joined[:, pos : pos + self._p] = X_t[from_t - tau]
am = np.concatenate([*self._adjacency_matrices[from_t]], axis=1)
# from_index + parents indices
parents = np.where(np.abs(am[from_index]) > 0)[0]
predictors = [from_index + self._p]
predictors.extend(parents + self._p)
# Estimate total effect
coefs = predict_adaptive_lasso(X_joined, predictors, to_index)
return coefs[0]
def get_error_independence_p_values(self):
"""Calculate the p-value matrix of independence between error variables.
Returns
-------
independence_p_values : array-like, shape (n_features, n_features)
p-value matrix of independence between error variables.
"""
E_list = np.empty((self._T, self._n, self._p))
for t, resid in enumerate(self.residuals_):
B_t = self._adjacency_matrices[t, 0]
E_list[t] = np.dot(np.eye(B_t.shape[0]) - B_t, resid.T).T
p_values_list = np.zeros([self._T, self._p, self._p])
p_values_list[:, :, :] = np.nan
for t in range(1, self._T):
p_values = np.zeros([self._p, self._p])
for i, j in itertools.combinations(range(self._p), 2):
_, p_value = hsic_test_gamma(
np.reshape(E_list[t][:, i], [self._n, 1]),
np.reshape(E_list[t][:, j], [self._n, 1]),
)
p_values[i, j] = p_value
p_values[j, i] = p_value
p_values_list[t] = p_values
return p_values_list
def _compute_residuals(self, X_t):
"""Compute residuals N(t)"""
M_tau = np.zeros((self._T, self._n_lags, self._p, self._p))
N_t = np.zeros((self._T, self._p, self._n))
N_t[:, :, :] = np.nan
for t in range(1, self._T):
# predictors
X_predictors = np.zeros((self._n, self._p * (1 + self._n_lags)))
for tau in range(self._n_lags):
pos = self._p * tau
X_predictors[:, pos : pos + self._p] = X_t[t - (tau + 1)].T
# estimate M(t,t-τ) by regression
X_target = X_t[t].T
for i in range(self._p):
reg = LinearRegression()
reg.fit(X_predictors, X_target[:, i])
for tau in range(self._n_lags):
pos = self._p * tau
M_tau[t, tau, i] = reg.coef_[pos : pos + self._p]
# Compute N(t)
N_t[t] = X_t[t]
for tau in range(self._n_lags):
N_t[t] = N_t[t] - np.dot(M_tau[t, tau], X_t[t - (tau + 1)])
return M_tau, N_t
def _estimate_instantaneous_effects(self, N_t):
"""Estimate instantaneous effects B(t,t) by applying LiNGAM"""
causal_orders = [[np.nan] * self._p]
B_t = np.zeros((self._T, self._p, self._p))
for t in range(1, self._T):
model = DirectLiNGAM(measure=self._measure)
model.fit(N_t[t].T)
causal_orders.append(model.causal_order_)
B_t[t] = model.adjacency_matrix_
return B_t, causal_orders
def _estimate_lagged_effects(self, B_t, M_tau):
"""Estimate lagged effects B(t,t-τ)"""
B_tau = np.zeros((self._T, self._n_lags, self._p, self._p))
for t in range(self._T):
for tau in range(self._n_lags):
B_tau[t, tau] = np.dot(np.eye(self._p) - B_t[t], M_tau[t, tau])
return B_tau
@property
def causal_orders_(self):
"""Estimated causal ordering.
Returns
-------
causal_order_ : array-like, shape (causal_order, ...)
The causal order of fitted models for B(t,t).
The shape of causal_order is (n_features),
where ``n_features`` is the number of features.
"""
return self._causal_orders
@property
def adjacency_matrices_(self):
"""Estimated adjacency matrices.
Returns
-------
adjacency_matrices_ : array-like, shape ((B(t,t), B(t,t-1), ..., B(t,t-τ)), ...)
The list of adjacency matrix B(t,t) and B(t,t-τ) for longitudinal datasets.
The shape of B(t,t) and B(t,t-τ) is (n_features, n_features), where
``n_features`` is the number of features.
**If the previous data required for the calculation are not available,
such as B(t,t) or B(t,t-τ) at t=0, all elements of the matrix are nan**.
"""
return self._adjacency_matrices
@property
def residuals_(self):
"""Residuals of regression.
Returns
-------
residuals_ : list, shape [E, ...]
Residuals of regression, where ``E`` is an dataset.
The shape of ``E`` is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
"""
return self._residuals
class LongitudinalBootstrapResult(object):
"""The result of bootstrapping for LongitudinalLiNGAM."""
def __init__(self, n_timepoints, adjacency_matrices, total_effects):
"""Construct a BootstrapResult.
Parameters
----------
adjacency_matrices : array-like, shape (n_sampling)
The adjacency matrix list by bootstrapping.
total_effects : array-like, shape (n_sampling)
The total effects list by bootstrapping.
"""
self._n_timepoints = n_timepoints
self._adjacency_matrices = adjacency_matrices
self._total_effects = total_effects
@property
def adjacency_matrices_(self):
"""The adjacency matrix list by bootstrapping.
Returns
-------
adjacency_matrices_ : array-like, shape (n_sampling)
The adjacency matrix list, where ``n_sampling`` is
the number of bootstrap sampling.
"""
return self._adjacency_matrices
@property
def total_effects_(self):
"""The total effect list by bootstrapping.
Returns
-------
total_effects_ : array-like, shape (n_sampling)
The total effect list, where ``n_sampling`` is
the number of bootstrap sampling.
"""
return self._total_effects
def get_causal_direction_counts(
self,
n_directions=None,
min_causal_effect=None,
split_by_causal_effect_sign=False,
):
"""Get causal direction count as a result of bootstrapping.
Parameters
----------
n_directions : int, optional (default=None)
If int, then The top ``n_directions`` items are included in the result
min_causal_effect : float, optional (default=None)
Threshold for detecting causal direction.
If float, then causal directions with absolute values of causal
effects less than ``min_causal_effect`` are excluded.
split_by_causal_effect_sign : boolean, optional (default=False)
If True, then causal directions are split depending on the sign of the causal effect.
Returns
-------
causal_direction_counts : dict
List of causal directions sorted by count in descending order.
The dictionary has the following format::
{'from': [n_directions], 'to': [n_directions], 'count': [n_directions]}
where ``n_directions`` is the number of causal directions.
"""
# Check parameters
if isinstance(n_directions, (numbers.Integral, np.integer)):
if not 0 < n_directions:
raise ValueError("n_directions must be an integer greater than 0")
elif n_directions is None:
pass
else:
raise ValueError("n_directions must be an integer greater than 0")
if min_causal_effect is None:
min_causal_effect = 0.0
else:
if not 0.0 < min_causal_effect:
raise ValueError("min_causal_effect must be an value greater than 0.")
# Count causal directions
cdc_list = []
for t in range(self._n_timepoints):
directions = []
for m in self._adjacency_matrices:
am = np.concatenate([*m[t]], axis=1)
direction = np.array(np.where(np.abs(am) > min_causal_effect))
if split_by_causal_effect_sign:
signs = (
np.array([np.sign(am[i][j]) for i, j in direction.T])
.astype("int64")
.T
)
direction = np.vstack([direction, signs])
directions.append(direction.T)
directions = np.concatenate(directions)
if len(directions) == 0:
cdc = {"from": [], "to": [], "count": []}
if split_by_causal_effect_sign:
cdc["sign"] = []
cdc_list.append(cdc)
continue
directions, counts = np.unique(directions, axis=0, return_counts=True)
sort_order = np.argsort(-counts)
sort_order = (
sort_order[:n_directions] if n_directions is not None else sort_order
)
counts = counts[sort_order]
directions = directions[sort_order]
cdc = {
"from": directions[:, 1].tolist(),
"to": directions[:, 0].tolist(),
"count": counts.tolist(),
}
if split_by_causal_effect_sign:
cdc["sign"] = directions[:, 2].tolist()
cdc_list.append(cdc)
return cdc_list
def get_directed_acyclic_graph_counts(
self, n_dags=None, min_causal_effect=None, split_by_causal_effect_sign=False
):
"""Get DAGs count as a result of bootstrapping.
Parameters
----------
n_dags : int, optional (default=None)
If int, then The top ``n_dags`` items are included in the result
min_causal_effect : float, optional (default=None)
Threshold for detecting causal direction.
If float, then causal directions with absolute values of causal effects less than
``min_causal_effect`` are excluded.
split_by_causal_effect_sign : boolean, optional (default=False)
If True, then causal directions are split depending on the sign of the causal effect.
Returns
-------
directed_acyclic_graph_counts : dict
List of directed acyclic graphs sorted by count in descending order.
The dictionary has the following format::
{'dag': [n_dags], 'count': [n_dags]}.
where ``n_dags`` is the number of directed acyclic graphs.
"""
# Check parameters
if isinstance(n_dags, (numbers.Integral, np.integer)):
if not 0 < n_dags:
raise ValueError("n_dags must be an integer greater than 0")
elif n_dags is None:
pass
else:
raise ValueError("n_dags must be an integer greater than 0")
if min_causal_effect is None:
min_causal_effect = 0.0
else:
if not 0.0 < min_causal_effect:
raise ValueError("min_causal_effect must be an value greater than 0.")
# Count directed acyclic graphs
dagc_list = []
for t in range(self._n_timepoints):
dags = []
for m in self._adjacency_matrices:
am = np.concatenate([*m[t]], axis=1)
dag = np.abs(am) > min_causal_effect
if split_by_causal_effect_sign:
direction = np.array(np.where(dag))
signs = np.zeros_like(dag).astype("int64")
for i, j in direction.T:
signs[i][j] = np.sign(am[i][j]).astype("int64")
dag = signs
dags.append(dag)
dags, counts = np.unique(dags, axis=0, return_counts=True)
sort_order = np.argsort(-counts)
sort_order = sort_order[:n_dags] if n_dags is not None else sort_order
counts = counts[sort_order]
dags = dags[sort_order]
if split_by_causal_effect_sign:
dags = [
{
"from": np.where(dag)[1].tolist(),
"to": np.where(dag)[0].tolist(),
"sign": [dag[i][j] for i, j in np.array(np.where(dag)).T],
}
for dag in dags
]
else:
dags = [
{"from": np.where(dag)[1].tolist(), "to": np.where(dag)[0].tolist()}
for dag in dags
]
dagc_list.append({"dag": dags, "count": counts.tolist()})
return dagc_list
def get_probabilities(self, min_causal_effect=None):
"""Get bootstrap probability.
Parameters
----------
min_causal_effect : float, optional (default=None)
Threshold for detecting causal direction.
If float, then causal directions with absolute values of causal effects less than
``min_causal_effect`` are excluded.
Returns
-------
probabilities : array-like
List of bootstrap probability matrix.
"""
# check parameters
if min_causal_effect is None:
min_causal_effect = 0.0
else:
if not 0.0 < min_causal_effect:
raise ValueError("min_causal_effect must be an value greater than 0.")
prob = np.zeros(self._adjacency_matrices[0].shape)
for adj_mat in self._adjacency_matrices:
prob += np.where(np.abs(adj_mat) > min_causal_effect, 1, 0)
prob = prob / len(self._adjacency_matrices)
return prob
def get_total_causal_effects(self, min_causal_effect=None):
"""Get total effects list.
Parameters
----------
min_causal_effect : float, optional (default=None)
Threshold for detecting causal direction.
If float, then causal directions with absolute values of causal effects less than
``min_causal_effect`` are excluded.
Returns
-------
total_causal_effects : dict
List of bootstrap total causal effect sorted by probability in descending order.
The dictionary has the following format::
{'from': [n_directions], 'to': [n_directions], 'effect': [n_directions], 'probability': [n_directions]}
where ``n_directions`` is the number of causal directions.
"""
# Check parameters
if min_causal_effect is None:
min_causal_effect = 0.0
else:
if not 0.0 < min_causal_effect:
raise ValueError("min_causal_effect must be an value greater than 0.")
# probability
probs = np.sum(
np.where(np.abs(self._total_effects) > min_causal_effect, 1, 0),
axis=0,
keepdims=True,
)[0]
probs = probs / len(self._total_effects)
# causal directions
dirs = np.array(np.where( | np.abs(probs) | numpy.abs |
import random
import cv2
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
class custom_aug:
def __init__(self) -> None:
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
def sometimes(aug): return iaa.Sometimes(0.5, aug)
self.main_seq = iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.15), # vertically flip 20% of all images
# crop images by -5% to 10% of their height/width
sometimes(iaa.CropAndPad(
percent=(0, 0.15),
pad_mode=ia.ALL,
pad_cval=(0, 255)
)),
sometimes(iaa.Affine(
# scale images to 80-120% of their size, individually per axis
scale={"x": (0.5, 1.3), "y": (0.5, 1.5)},
# translate by -20 to +20 percent (per axis)
translate_px={"x": (-10, 10), "y": (-10, 10) },
#translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-30, 30), # rotate by -45 to +45 degrees
shear=(-25, 25), # shear by -16 to +16 degrees
# use nearest neighbour or bilinear interpolation (fast)
order=[0, 1],
# if mode is constant, use a cval between 0 and 255
cval=(0, 255),
# use any of scikit-image's warping modes (see 2nd image from the top for examples)
mode=ia.ALL
)),
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
iaa.SomeOf((0, 5),
[
# convert images into their superpixel representation
#sometimes(iaa.Superpixels(
# p_replace=(0, 1.0), n_segments=(20, 100))),
iaa.OneOf([
# blur images with a sigma between 0 and 3.0
iaa.GaussianBlur((0.9, 1.0)),
# blur image using local means with kernel sizes between 2 and 7
#iaa.AverageBlur(k=(1, 3)),
# blur image using local medians with kernel sizes between 2 and 7
#iaa.MedianBlur(k=(1, 3)),
]),
#iaa.Emboss(alpha=(0.4, 0.8), strength=(
# 0.4, 0.8)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.1, .5)),
iaa.DirectedEdgeDetect(alpha=(0.1, .50), direction=(0.90, 1.0)),
])),
# add gaussian noise to images
iaa.AdditiveGaussianNoise(loc=0, scale=(
0.0, 0.05*255), per_channel=0.5),
# invert color channels
#iaa.Invert(1, per_channel=True),
# change brightness of images (by -10 to 10 of original value)
iaa.Add((0.8, .1), per_channel=0.5),
# change hue and saturation
iaa.AddToHueAndSaturation((0, 1)),
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
#iaa.Multiply((0.8, 1.1), per_channel=0.5),
iaa.FrequencyNoiseAlpha(
exponent=(-2, 0),
first=iaa.Multiply((0.6, 1.), per_channel=True),
#second=iaa.LinearContrast((0.6, 1.0))
)
]),
# improve or worsen the contrast
#iaa.LinearContrast((0.1, .60), per_channel=0.5),
#iaa.Grayscale(alpha=(0.1, .5)),
# move pixels locally around (with random strengths)
#sometimes(iaa.ElasticTransformation(
# alpha=(0.7, 1), sigma=0.1)),
# sometimes move parts of the image around
#sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
#sometimes(iaa.PerspectiveTransform(scale=(0.1, 0.5)))
],
random_order=True
)
],
random_order=True
)
def to_imgaug_format(self, bboxes: list, names: list, shape:list) -> BoundingBoxesOnImage:
bbox_on_image = []
for n, bbox in zip(names, bboxes):
x1, y1, x2, y2 = bbox
bbox_on_image.append(BoundingBox(x1, y1, x2, y2, n))
bbs = BoundingBoxesOnImage(bbox_on_image, shape=shape)
return bbs
def to_numpy(self, bbs)->np.ndarray:
res = []
_name = []
for each in bbs.bounding_boxes:
res.append([each.x1, each.y1, each.x2, each.y2])
_name.append(each.label)
if res == []:
print('*'*20)
print('*'*20)
res = [[0.,0.,0.,0.]]
_name = [0.]
return np.asarray(res), _name
def __call__(self, image: np.ndarray, bboxes:list, names:list)->list:
bbs = self.to_imgaug_format(bboxes, names, image.shape)
images_aug, bbs_aug = self.main_seq(image=image, bounding_boxes=bbs)
clipped_bbs = bbs_aug.remove_out_of_image().clip_out_of_image()
bbox, lb = self.to_numpy(clipped_bbs)
return images_aug, bbox, lb
class RandomShear(object):
def __init__(self, shear_factor=0.2):
self.shear_factor = shear_factor
if type(self.shear_factor) == tuple:
assert len(
self.shear_factor) == 2, "Invalid range for scaling factor"
else:
self.shear_factor = (-self.shear_factor, self.shear_factor)
shear_factor = random.uniform(*self.shear_factor)
def __call__(self, img, bboxes):
shear_factor = random.uniform(*self.shear_factor)
w, h = img.shape[1], img.shape[0]
if shear_factor < 0:
img, bboxes = HorizontalFlip()(img, bboxes)
M = np.array([[1, abs(shear_factor), 0], [0, 1, 0]])
nW = img.shape[1] + abs(shear_factor*img.shape[0])
bboxes[:, [0, 2]] += ((bboxes[:, [1, 3]]) *
abs(shear_factor)).astype(int)
img = cv2.warpAffine(img, M, (int(nW), img.shape[0]))
if shear_factor < 0:
img, bboxes = HorizontalFlip()(img, bboxes)
img = cv2.resize(img, (w, h))
scale_factor_x = nW / w
bboxes[:, :4] /= [scale_factor_x, 1, scale_factor_x, 1]
return img, bboxes
class Shear(object):
def __init__(self, shear_factor=0.2):
self.shear_factor = shear_factor
def __call__(self, img, bboxes):
w, h = img.shape[:2]
shear_factor = self.shear_factor
if shear_factor < 0:
img, bboxes = HorizontalFlip()(img, bboxes)
M = np.array([[1, abs(shear_factor), 0], [0, 1, 0]])
nW = img.shape[1] + int(abs(shear_factor*img.shape[0]))
bboxes[:, [0, 2]] += ((bboxes[:, [1, 3]]) *
abs(shear_factor)).astype(int)
img = cv2.warpAffine(img, M, (w, img.shape[0]))
if shear_factor < 0:
img, bboxes = HorizontalFlip()(img, bboxes)
print(f'(shear)--> new {bboxes}')
bboxes = clip_box(bboxes, [0, 0, w, h], 0)
print(f'--> clipped {bboxes}')
return img, bboxes
class HorizontalFlip(object):
def __init__(self):
pass
def __call__(self, img, bboxes):
img_center = np.array(img.shape[:2])[::-1]/2
img_center = np.hstack((img_center, img_center))
img = img[:, ::-1, :]
bboxes[:, [0, 2]] += 2*(img_center[[0, 2]] - bboxes[:, [0, 2]])
box_w = abs(bboxes[:, 0] - bboxes[:, 2])
bboxes[:, 0] -= box_w
bboxes[:, 2] += box_w
return img, bboxes
def bbox_area(bbox):
return (bbox[:, 2] - bbox[:, 0])*(bbox[:, 3] - bbox[:, 1])
def clip_box(bbox, clip_box, alpha):
ar_ = (bbox_area(bbox))
x_min = np.maximum(bbox[:, 0], clip_box[0]).reshape(-1, 1)
y_min = np.maximum(bbox[:, 1], clip_box[1]).reshape(-1, 1)
x_max = | np.minimum(bbox[:, 2], clip_box[2]) | numpy.minimum |
import argparse
import time
import math
import os
import os.path
import numpy as np
from tqdm import tqdm
import gc
import sys
import pdb
from glob import glob
from sklearn.utils import shuffle
from joblib import Parallel, delayed
import multiprocessing
from PIL import Image,ImageStat
from openslide import OpenSlide, ImageSlide, OpenSlideUnsupportedFormatError
import pyvips
import random
import torch.utils.data.distributed
import horovod.torch as hvd
import cv2
import torch.multiprocessing as mp
import pprint
import torch
import torchvision
import torchvision.transforms as transforms
from torchvision.utils import save_image
import torchvision.datasets as vdsets
from torchsummary import summary
from lib.resflow import ACT_FNS, ResidualFlow
import lib.datasets as datasets
import lib.optimizers as optim
import lib.utils as utils
from lib.GMM import GMM_model as gmm
import lib.image_transforms as imgtf
import lib.layers as layers
import lib.layers.base as base_layers
from lib.lr_scheduler import CosineAnnealingWarmRestarts
"""
- Implement in training
- Deploy
"""
# Arguments
parser = argparse.ArgumentParser(description='Residual Flow Model Color Information', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--data', type=str, default='custom', choices=[
'custom'
]
)
# mnist
parser.add_argument('--dataroot', type=str, default='data')
## GMM ##
parser.add_argument('--nclusters', type=int, default=4,help='The amount of tissue classes trained upon')
parser.add_argument('--dataset', type=str, default="0", help='Which dataset to use. "16" for CAMELYON16 or "17" for CAMELYON17')
parser.add_argument('--slide_path', type=str, help='Folder of where the training data whole slide images are located', default=None)
parser.add_argument('--mask_path', type=str, help='Folder of where the training data whole slide images masks are located', default=None)
parser.add_argument('--valid_slide_path', type=str, help='Folder of where the validation data whole slide images are located', default=None)
parser.add_argument('--valid_mask_path', type=str, help='Folder of where the validation data whole slide images masks are located', default=None)
parser.add_argument('--slide_format', type=str, help='In which format the whole slide images are saved.', default='tif')
parser.add_argument('--mask_format', type=str, help='In which format the masks are saved.', default='tif')
parser.add_argument('--bb_downsample', type=int, help='Level to use for the bounding box construction as downsampling level of whole slide image', default=7)
parser.add_argument('--log_image_path', type=str, help='Path of savepath of downsampled image with processed rectangles on it.', default='.')
parser.add_argument('--epoch_steps', type=int, help='The hard - coded amount of iterations in one epoch.', default=1000)
# Not used now
#parser.add_argument('--batch_tumor_ratio', type=float, help='The ratio of the batch that contains tumor', default=1)
parser.add_argument('--val_split', type=float, default=0.15)
parser.add_argument('--debug', action='store_true', help='If running in debug mode')
parser.add_argument('--fp16_allreduce', action='store_true', help='If all reduce in fp16')
##
parser.add_argument('--imagesize', type=int, default=32)
# 28
parser.add_argument('--nbits', type=int, default=8) # Only used for celebahq.
parser.add_argument('--block', type=str, choices=['resblock', 'coupling'], default='resblock')
parser.add_argument('--coeff', type=float, default=0.98)
parser.add_argument('--vnorms', type=str, default='2222')
parser.add_argument('--n-lipschitz-iters', type=int, default=None)
parser.add_argument('--sn-tol', type=float, default=1e-3)
parser.add_argument('--learn-p', type=eval, choices=[True, False], default=False,help='Learn Lipschitz norms, see paper')
parser.add_argument('--n-power-series', type=int, default=None, help='Amount of power series evaluated, see paper')
parser.add_argument('--factor-out', type=eval, choices=[True, False], default=False,help='Factorize dimensions, see paper')
parser.add_argument('--n-dist', choices=['geometric', 'poisson'], default='poisson')
parser.add_argument('--n-samples', type=int, default=1)
parser.add_argument('--n-exact-terms', type=int, default=2,help='Exact terms computed in series estimation, see paper')
parser.add_argument('--var-reduc-lr', type=float, default=0)
parser.add_argument('--neumann-grad', type=eval, choices=[True, False], default=True,help='Neumann gradients, see paper')
parser.add_argument('--mem-eff', type=eval, choices=[True, False], default=True,help='Memory efficient backprop, see paper')
parser.add_argument('--act', type=str, choices=ACT_FNS.keys(), default='swish')
parser.add_argument('--idim', type=int, default=128)
parser.add_argument('--nblocks', type=str, default='16-16-16')
parser.add_argument('--squeeze-first', type=eval, default=False, choices=[True, False])
parser.add_argument('--actnorm', type=eval, default=True, choices=[True, False])
parser.add_argument('--fc-actnorm', type=eval, default=False, choices=[True, False])
parser.add_argument('--batchnorm', type=eval, default=True, choices=[True, False])
parser.add_argument('--dropout', type=float, default=0.)
parser.add_argument('--fc', type=eval, default=False, choices=[True, False])
parser.add_argument('--kernels', type=str, default='3-1-3')
parser.add_argument('--add-noise', type=eval, choices=[True, False], default=False)
parser.add_argument('--quadratic', type=eval, choices=[True, False], default=False)
parser.add_argument('--fc-end', type=eval, choices=[True, False], default=False)
parser.add_argument('--fc-idim', type=int, default=8)
parser.add_argument('--preact', type=eval, choices=[True, False], default=True)
parser.add_argument('--padding', type=int, default=0)
parser.add_argument('--first-resblock', type=eval, choices=[True, False], default=False)
parser.add_argument('--cdim', type=int, default=128)
parser.add_argument('--optimizer', type=str, choices=['adam', 'adamax', 'rmsprop', 'sgd'], default='adam')
parser.add_argument('--scheduler', type=eval, choices=[True, False], default=False)
parser.add_argument('--nepochs', help='Number of epochs for training', type=int, default=1000)
parser.add_argument('--batchsize', help='Minibatch size', type=int, default=64)
parser.add_argument('--val-batchsize', help='minibatch size', type=int, default=200)
parser.add_argument('--lr', help='Learning rate', type=float, default=1e-3)
parser.add_argument('--wd', help='Weight decay', type=float, default=0)
# 0
parser.add_argument('--warmup-iters', type=int, default=0)
parser.add_argument('--annealing-iters', type=int, default=0)
parser.add_argument('--save', help='directory to save results', type=str, default='experiment1')
parser.add_argument('--seed', type=int, default=None)
parser.add_argument('--ema-val', type=eval, help='Use exponential moving averages of parameters at validation.', choices=[True, False], default=False)
parser.add_argument('--update-freq', type=int, default=1)
parser.add_argument('--task', type=str, choices=['density', 'classification', 'hybrid','gmm'], default='gmm')
parser.add_argument('--scale-dim', type=eval, choices=[True, False], default=False)
parser.add_argument('--rcrop-pad-mode', type=str, choices=['constant', 'reflect'], default='reflect')
parser.add_argument('--padding-dist', type=str, choices=['uniform', 'gaussian'], default='uniform')
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--save_conv', type=eval,help='Save converted images.', default=False)
parser.add_argument('--begin-epoch', type=int, default=0)
parser.add_argument('--nworkers', type=int, default=8)
parser.add_argument('--print-freq', help='Print progress every so iterations', type=int, default=1)
parser.add_argument('--vis-freq', help='Visualize progress every so iterations', type=int, default=5)
parser.add_argument('--save-every', help='VSave model every so epochs', type=int, default=1)
args = parser.parse_args()
# Random seed
if args.seed is None:
args.seed = np.random.randint(100000)
# Assert for now
assert args.batchsize == args.val_batchsize, "Training and Validation batch size must match"
# Horovod: initialize library.
hvd.init()
print(f"hvd.size {hvd.size()} hvd.rank {hvd.rank()} hvd.local_rank {hvd.local_rank()}")
def rank00():
if hvd.rank() == 0 and hvd.local_rank() == 0:
return True
if rank00():
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if rank00():
logger.info(args)
if device.type == 'cuda':
if rank00():
logger.info(f'Found {hvd.size()} CUDA devices.')
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
if rank00():
for i in range(torch.cuda.device_count()):
props = torch.cuda.get_device_properties(i)
logger.info('{} \t Memory: {:.2f}GB'.format(props.name, props.total_memory / (1024**3)))
else:
logger.info('WARNING: Using device {}'.format(device))
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if device.type == 'cuda':
torch.cuda.manual_seed(args.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
kwargs = {'num_workers': 1, 'pin_memory': True} if device.type == 'cuda' else {}
def geometric_logprob(ns, p):
return torch.log(1 - p + 1e-10) * (ns - 1) + torch.log(p + 1e-10)
def standard_normal_sample(size):
return torch.randn(size)
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def normal_logprob(z, mean, log_std):
mean = mean + torch.tensor(0.)
log_std = log_std + torch.tensor(0.)
c = torch.tensor([math.log(2 * math.pi)]).to(z)
inv_sigma = torch.exp(-log_std)
tmp = (z - mean) * inv_sigma
return -0.5 * (tmp * tmp + 2 * log_std + c)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def rescale(tensor):
"""
Parameters
----------
tensor : Pytorch tensor
Tensor to be rescaled to [0,1] interval.
Returns
-------
Rescaled tensor.
"""
tensor -= tensor.min()
tensor /= tensor.max()
return tensor
def reduce_bits(x):
if args.nbits < 8:
x = x * 255
x = torch.floor(x / 2**(8 - args.nbits))
x = x / 2**args.nbits
return x
def add_noise(x, nvals=256):
"""
[0, 1] -> [0, nvals] -> add noise -> [0, 1]
"""
if args.add_noise:
noise = x.new().resize_as_(x).uniform_()
x = x * (nvals - 1) + noise
x = x / nvals
return x
def update_lr(optimizer, itr):
iter_frac = min(float(itr + 1) / max(args.warmup_iters, 1), 1.0)
lr = args.lr * iter_frac
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def add_padding(x, nvals=256):
# Theoretically, padding should've been added before the add_noise preprocessing.
# nvals takes into account the preprocessing before padding is added.
if args.padding > 0:
if args.padding_dist == 'uniform':
u = x.new_empty(x.shape[0], args.padding, x.shape[2], x.shape[3]).uniform_()
logpu = torch.zeros_like(u).sum([1, 2, 3]).view(-1, 1)
return torch.cat([x, u / nvals], dim=1), logpu
elif args.padding_dist == 'gaussian':
u = x.new_empty(x.shape[0], args.padding, x.shape[2], x.shape[3]).normal_(nvals / 2, nvals / 8)
logpu = normal_logprob(u, nvals / 2, math.log(nvals / 8)).sum([1, 2, 3]).view(-1, 1)
return torch.cat([x, u / nvals], dim=1), logpu
else:
raise ValueError()
else:
return x, torch.zeros(x.shape[0], 1).to(x)
def remove_padding(x):
if args.padding > 0:
return x[:, :im_dim, :, :]
else:
return x
def open_img(path):
return np.asarray(Image.open(path))[:, :, 0] / 255
def get_valid_idx(mask_list):
""" Get the valid indices of masks by opening images in parallel """
num_cores = multiprocessing.cpu_count()
data = Parallel(n_jobs=num_cores)(delayed(open_img)(i) for i in mask_list)
return data
if rank00():
logger.info('Loading dataset {}'.format(args.data))
class make_dataset(torch.utils.data.Dataset):
"""Make Pytorch dataset."""
def __init__(self, args,train=True):
"""
Args:
"""
self.train = train
if args.mask_path:
self.train_paths = shuffle(list(zip(sorted(glob(os.path.join(args.slide_path,f'*.{args.slide_format}'))),
sorted(glob(os.path.join(args.mask_path,f'*.{args.mask_format}'))))))
else:
self.train_paths = shuffle(sorted(glob(os.path.join(args.slide_path,f'*.{args.slide_format}'))))
print(f"Found {len(self.train_paths)} images")
if args.valid_slide_path:
if self.args.mask_path:
self.valid_paths = shuffle(list(zip(sorted(glob(os.path.join(args.valid_slide_path,f'*.{args.slide_format}'))),
sorted(glob(os.path.join(args.valid_mask_path,f'*.{args.mask_format}'))))))
else:
self.valid_paths = shuffle(sorted(glob(os.path.join(args.valid_slide_path,f'*.{args.slide_format}'))))
else:
val_split = int(len(self.train_paths) * args.val_split)
self.valid_paths = self.train_paths[val_split:]
self.train_paths = self.train_paths[:val_split]
self.contours_train = []
self.contours_valid = []
self.contours_tumor = []
self.level_used = args.bb_downsample
self.mag_factor = pow(2, self.level_used)
self.patch_size = args.imagesize
# self.tumor_ratio = args.batch_tumor_ratio
self.log_image_path = args.log_image_path
self.slide_format = args.slide_format
@staticmethod
def _transform(image,train=True):
if train:
return transforms.Compose([
# transforms.ToPILImage(),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
reduce_bits,
lambda x: add_noise(x, nvals=2**args.nbits),
])(image)
else:
return transforms.Compose([
transforms.ToTensor(),
reduce_bits,
lambda x: add_noise(x, nvals=2**args.nbits),
])(image)
def __len__(self):
return len(self.train_paths)
def get_bb(self):
hsv = cv2.cvtColor(self.rgb_image, cv2.COLOR_BGR2HSV)
lower_red = np.array([20, 20, 20])
upper_red = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower_red, upper_red)
# (50, 50)
close_kernel = | np.ones((50, 50), dtype=np.uint8) | numpy.ones |
import numpy as np
from collections import namedtuple
import json
import copy
# Defining the neural network model
ModelParam = namedtuple('ModelParam',
['input_size', 'output_size', 'layers', 'activation', 'noise_bias', 'output_noise'])
model_params = {}
model_test1 = ModelParam(
input_size=9,
output_size=1,
layers=[45, 5],
activation=['sigmoid'],
noise_bias=0.0,
output_noise=[False, False, True],
)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(x, 0)
def passthru(x):
return x
# useful for discrete actions
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
# useful for discrete actions
def sample(p):
return np.argmax(np.random.multinomial(1, p))
activate = {'sigmoid': sigmoid, 'relu': relu, 'tanh': np.tanh, 'softmax': softmax, 'passthru': passthru}
class Model(object):
''' simple feedforward model '''
def __init__(self, model_params=None):
if model_params is not None:
# Requirement for mfa to initialize
# self.output_noise = model_params.output_noise
self.layers = model_params.layers
self.input_size = model_params.input_size
self.output_size = model_params.output_size
self.activation = model_params.activation
# secondary requirement
self.rnn_mode = False # in the future will be useful
self.time_input = 0 # use extra sinusoid input
self.sigma_bias = model_params.noise_bias # bias in stdev of output
self.noise_bias = model_params.noise_bias
self.sigma_factor = 0.5 # multiplicative in stdev of output
self.output_noise = model_params.output_noise
self.shapes = []
self.sample_output = False
self.render_mode = False
self.weight = []
self.bias = []
self.param_count = 0
self.initialize()
def initialize(self):
# Setting shapes for weight matrix
self.shapes = []
for _layer in range(len(self.layers)):
if _layer == 0:
self.shapes = [(self.input_size, self.layers[_layer])]
else:
self.shapes.append((self.layers[_layer - 1], self.layers[_layer]))
self.shapes.append((self.layers[_layer], self.output_size))
# setting activations for the model
if len(self.activation) > 1:
self.activations = [activate[x] for x in self.activation]
elif self.activation[0] == 'relu':
self.activations = [relu, relu, passthru]
elif self.activation[0] == 'sigmoid':
self.activations = [np.tanh, np.tanh, sigmoid]
elif self.activation[0] == 'softmax':
self.activations = [np.tanh, np.tanh, softmax]
self.sample_output = True
elif self.activation[0] == 'passthru':
self.activations = [np.tanh, np.tanh, passthru]
else:
self.activations = [np.tanh, np.tanh, np.tanh]
self.weight = []
self.bias = []
# self.bias_log_std = []
# self.bias_std = []
self.param_count = 0
idx = 0
for shape in self.shapes:
self.weight.append(np.zeros(shape=shape))
self.bias.append(np.zeros(shape=shape[1]))
self.param_count += (np.product(shape) + shape[1])
# if self.output_noise[idx]:
# self.param_count += shape[1]
# log_std = np.zeros(shape=shape[1])
# self.bias_log_std.append(log_std)
# out_std = np.exp(self.sigma_factor*log_std + self.sigma_bias)
# self.bias_std.append(out_std)
# idx += 1
def get_action(self, X, mean_mode=False):
# if mean_mode = True, ignore sampling.
h = np.array(X).flatten()
num_layers = len(self.weight)
for i in range(num_layers):
w = self.weight[i]
b = self.bias[i]
h = np.matmul(h, w) + b
# if (self.output_noise[i] and (not mean_mode)):
# out_size = self.shapes[i][1]
# out_std = self.bias_std[i]
# output_noise = np.random.randn(out_size)*out_std
# h += output_noise
h = self.activations[i](h)
if self.sample_output:
h = sample(h)
return h
def get_action_once(self, X, mean_mode=False):
# if mean_mode = True, ignore sampling.
h = X
num_layers = len(self.weight)
for i in range(num_layers):
w = self.weight[i]
b = self.bias[i]
h = np.dot(h, w) + b
# if (self.output_noise[i] and (not mean_mode)):
# out_size = self.shapes[i][1]
# out_std = self.bias_std[i]
# output_noise = np.random.randn(out_size)*out_std
# h += output_noise
h = self.activations[i](h)
if self.sample_output:
h = sample(h)
return h
def set_model_params(self, gene):
pointer = 0
for i in range(len(self.shapes)):
w_shape = self.shapes[i]
b_shape = self.shapes[i][1]
s_w = np.product(w_shape)
s = s_w + b_shape
chunk = np.array(gene[pointer:pointer + s])
self.weight[i] = chunk[:s_w].reshape(w_shape)
self.bias[i] = chunk[s_w:].reshape(b_shape)
# if self.output_noise[i]:
# s = b_shape
# self.bias_log_std[i] = np.array(gene[pointer:pointer+s])
# self.bias_std[i] = np.exp(self.sigma_factor*self.bias_log_std[i] + self.sigma_bias)
if self.render_mode:
print("bias_std, layer", i, self.bias_std[i])
pointer += s
def load_model(self, filename):
with open(filename) as f:
datastore = json.load(f)
model_param = ModelParam(
input_size=datastore['input_size'],
output_size=datastore['output_size'],
layers=datastore['layers'],
activation=datastore['activation'],
noise_bias=datastore['noise_bias'],
output_noise=datastore['output_noise'],
)
model_ = Model(model_param)
model_.set_model_params(datastore['gene'])
print('Loading model from file: {}'.format(filename))
return model_
def save_model(self, filename=None):
modelstore = copy.deepcopy(self.__dict__)
datastore = {}
for key in modelstore.keys():
if key in ['input_size', 'output_size', 'layers', 'activation',
'noise_bias', 'output_noise']:
datastore[key] = modelstore[key]
datastore['gene'] = self.get_gene()
if filename is not None:
with open(filename, 'w') as f:
json.dump(datastore, f)
print('Saving model to file: {}'.format(filename))
else:
print('Please define filename to store model object!')
def get_random_model_params(self, stdev=0.1):
return np.random.randn(self.param_count) * stdev
def get_gene(self):
gene = []
for i in range(len(self.weight)):
w = self.weight[i].reshape(-1).tolist()
b = self.bias[i].reshape(-2).tolist()
gene.extend(w)
gene.extend(b)
assert len(gene) == self.param_count
return gene
def __repr__(self):
modelstore = copy.deepcopy(self.__dict__)
s = 'Model Characteristics'
for key in modelstore.keys():
if key in ['input_size', 'output_size', 'layers', 'activation',
'noise_bias', 'output_noise']:
s = '{} \n{}: {}'.format(s,key,modelstore[key])
return s
class ModelPopulation(object):
def __init__(self):
self.model_param = None
self.model = None
self.genes = None
self.fitness = None
self.scores = None
self.scorers = None
# right now this is just index number passed to list
# todo: make a dist of measures and use it to take vlue from it
self.fitness_measure = None
self.size = None
def initialize(self):
if self.model_param is not None:
self.model = Model(self.model_param)
def save_population(self, model, solutions, fitness, measure, scores, scorers, filename=''):
modelstore = copy.deepcopy(model.__dict__)
datastore = {}
for key in modelstore.keys():
if key in ['input_size', 'output_size', 'layers', 'activation',
'noise_bias', 'output_noise']:
datastore[key] = modelstore[key]
datastore['genes'] = solutions
datastore['fitness'] = fitness
datastore['scores'] = scores
datastore['scorers'] = scorers
datastore['fitness_measure'] = measure
datastore['size'] = len(solutions)
# for key in datastore.keys():
# print('{} has type {}'.format(key,type(datastore[key])))
if filename is not None:
with open(filename, 'w') as f:
json.dump(datastore, f)
print('Saving model population to file: {}'.format(filename))
else:
print('Please define filename to store model object!')
def load_population(self, filename, silent=True):
with open(filename) as f:
datastore = json.load(f)
model_param = ModelParam(
input_size=datastore['input_size'],
output_size=datastore['output_size'],
layers=datastore['layers'],
activation=datastore['activation'],
noise_bias=datastore['noise_bias'],
output_noise=datastore['output_noise'],
)
self.model_param = model_param
self.model = Model(model_param)
self.genes = datastore['genes']
self.fitness = datastore['fitness']
self.scores = datastore['scores']
self.scorers = datastore['scorers']
self.size = datastore['size']
self.fitness_measure = datastore['fitness_measure']
if not silent:
print('Loading model population from file: {}'.format(filename))
fitment = self.fitness
print('Population Size: {} Fitness Measure: {} Best Fitness: {}'.format(len(self.genes),
self.scorers,
fitment[np.argmax(fitment)]))
return self
# todo: put logic in place
def select_best(self, k=1, fill_population=False):
if self.genes is not None:
if k == 1:
# todo: put logic for k right now returns the best in population
fitment = self.fitness
gene = self.genes[np.argmax(fitment)]
model = self.model
model.set_model_params(gene)
return model
else:
if k < 1 and k > 0 and isinstance(k, float):
p = np.percentile(self.fitness, q=int((1 - k) * 100))
ind = np.where(self.fitness > k)
else:
ind = | np.argpartition(self.fitness, -k) | numpy.argpartition |
import numpy as np
import math
from collections import namedtuple
import torch
import datetime
from tensorboardX import SummaryWriter
class model_summary_writer(object):
def __init__(self, summary_name , env):
now=datetime.datetime.now()
self.summary = SummaryWriter(logdir = 'log/'+ summary_name +'_{}'.format(now.strftime('%Y%m%d_%H%M%S')))
self.summary_cnt = 0
self.env = env
class WeightClipper(object):
def __init__(self, frequency=5):
self.frequency = frequency
def __call__(self, module):
# filter the variables to get the ones you want
if hasattr(module, 'weight'):
w = module.weight.data
w = w.clamp(-1,1)
def get_gae(rewards, learner_len, values, gamma, lamda):
# rewards = learner_rewards[1]
# learner_len=learner_len[1]
# values = learner_values[1]
# gamma = args.gamma
# lamda = args.lamda
rewards = torch.Tensor(rewards)
returns = torch.zeros_like(rewards)
advants = -1 * torch.ones_like(rewards)
masks = torch.ones_like(rewards)
masks[(learner_len-1):] = 0
running_returns = 0
previous_value = 0
running_advants = 0
for t in reversed(range(0, learner_len )):
running_returns = rewards[t] + gamma * running_returns * masks[t]
running_tderror = rewards[t] + gamma * previous_value * masks[t] - values.data[t]
running_advants = running_tderror + gamma * lamda * running_advants * masks[t]
returns[t] = running_returns
previous_value = values.data[t]
advants[t] = running_advants
advants[:learner_len] = (advants[:learner_len] - advants[:learner_len].mean()) / advants[:learner_len].std()
return returns, advants
def hard_update(target, source):
"""
Copies the parameters from source network to target network
:param target: Target network (PyTorch)
:param source: Source network (PyTorch)
:return:
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def trajs_to_tensor(exp_trajs):
np_trajs = []
for episode in exp_trajs:
for i in range(1,len(episode)+1):
np_trajs.append([[x.cur_state for x in episode[:i]], episode[i-1].action])
expert_len = np.array([len(x[0]) for x in np_trajs])
maxlen = np.max(expert_len)
expert_observations = -np.ones(shape = (len(np_trajs) , maxlen))
expert_actions = np.array([x[1] for x in np_trajs])
expert_len = []
for i in range(len(np_trajs)):
temp = np_trajs[i][0]
expert_observations[i,:len(temp)] = temp
expert_len.append(len(temp))
return expert_observations, expert_actions, expert_len
def arr_to_tensor(find_state, device, exp_obs, exp_act, exp_len):
exp_states = find_state(exp_obs)
exp_obs = torch.LongTensor(exp_states)
exp_act = torch.LongTensor(exp_act)
exp_len = torch.LongTensor(exp_len)
# exp_len , sorted_idx = exp_len.sort(0,descending = True)
# exp_obs = exp_obs[sorted_idx]
# exp_act = exp_act[sorted_idx]
return exp_obs, exp_act, exp_len
Step = namedtuple('Step','cur_state action next_state reward done')
def check_RouteID(episode,routes):
state_seq = [str(x.cur_state) for x in episode] + [str(episode[-1].next_state)]
episode_route = "-".join(state_seq)
if episode_route in routes:
idx = routes.index(episode_route)
else:
idx = -1
return idx
def normalize(vals):
"""
normalize to (0, max_val)
input:
vals: 1d array
"""
min_val = np.min(vals)
max_val = np.max(vals)
return (vals - min_val) / (max_val - min_val)
def sigmoid(xs):
"""
sigmoid function
inputs:
xs 1d array
"""
return [1 / (1 + math.exp(-x)) for x in xs]
def identify_routes(trajs):
num_trajs = len(trajs)
route_dict = {}
for i in range(num_trajs):
episode = trajs[i]
route = "-".join([str(x.cur_state) for x in episode] + [str(episode[-1].next_state)])
if route in route_dict.keys():
route_dict[route] += 1
else:
route_dict[route] = 1
out_list = []
for key in route_dict.keys():
route_len = len(key.split("-"))
out_list.append((key, route_len, route_dict[key]))
out_list = sorted(out_list, key=lambda x : x[2] , reverse = True)
return out_list
def expert_compute_state_visitation_freq(sw,trajs):
feat_exp = np.zeros([sw.n_states])
for episode in trajs:
for step in episode:
feat_exp[sw.pos2idx(step.cur_state)] += 1
feat_exp[sw.pos2idx(step.next_state)] += 1
feat_exp = feat_exp/len(trajs)
return feat_exp
def expert_compute_state_action_visitation_freq(sw, trajs):
N_STATES = sw.n_states
N_ACTIONS = sw.max_actions
mu = np.zeros([N_STATES,N_ACTIONS])
for episode in trajs:
for step in episode:
cur_state= step.cur_state
s = sw.pos2idx(cur_state)
action_list = sw.get_action_list(cur_state)
action = step.action
a = action_list.index(action)
mu[s,a] +=1
mu = mu/len(trajs)
return mu
def compute_state_visitation_freq(sw, gamma, trajs, policy, deterministic=True):
"""compute the expected states visition frequency p(s| theta, T)
using dynamic programming
inputs:
P_a NxNxN_ACTIONS matrix - transition dynamics
gamma float - discount factor
trajs list of list of Steps - collected from expert
policy Nx1 vector (or NxN_ACTIONS if deterministic=False) - policy
returns:
p Nx1 vector - state visitation frequencies
"""
N_STATES = sw.n_states
# N_ACTIONS = sw.max_actions
T = len(trajs[0])+1
# mu[s, t] is the prob of visiting state s at time t
mu = np.zeros([N_STATES, T])
for traj in trajs:
mu[sw.pos2idx(traj[0].cur_state), 0] += 1
mu[:,0] = mu[:,0]/len(trajs)
for t in range(T-1):
for s in range(N_STATES):
if deterministic:
mu[s, t+1] = sum([mu[pre_s, t]*sw.is_connected(sw.idx2pos(pre_s) , np.argmax(policy[pre_s]) , sw.idx2pos(s)) for pre_s in range(N_STATES)])
# mu[s, t+1] = sum([mu[pre_s, t]*P_a[pre_s, s, np.argmax(policy[pre_s])] for pre_s in range(N_STATES)])
else:
mu_temp = 0
for pre_s in range(N_STATES):
action_list = sw.get_action_list(sw.idx2pos(pre_s))
for a1 in range(len(action_list)):
mu_temp += mu[pre_s, t]*sw.is_connected(sw.idx2pos(pre_s) , action_list[a1] , sw.idx2pos(s)) *policy[pre_s, a1]
mu[s, t+1] = mu_temp
# mu[s, t+1] = sum([sum([mu[pre_s, t]*P_a[pre_s, s, a1]*policy[pre_s, a1] for a1 in range(N_ACTIONS)]) for pre_s in range(N_STATES)])
p = np.sum(mu, 1)
return p
def compute_state_action_visitation_freq(sw, gamma, trajs, policy, deterministic=True):
"""compute the expected states visition frequency p(s| theta, T)
using dynamic programming
inputs:
P_a NxNxN_ACTIONS matrix - transition dynamics
gamma float - discount factor
trajs list of list of Steps - collected from expert
policy Nx1 vector (or NxN_ACTIONS if deterministic=False) - policy
returns:
p Nx1 vector - state visitation frequencies
"""
N_STATES = sw.n_states
N_ACTIONS = sw.max_actions
route_list = identify_routes(trajs)
max_route_length = max([x[1] for x in route_list])
T = max_route_length
mu = | np.zeros([N_STATES,N_ACTIONS , T]) | numpy.zeros |
import numpy as np
from scipy import optimize, interpolate
import pandas as pd
import matplotlib.pyplot as plt
def transform(p, x, y):
#TODO: Read the xlsx files of origin tests of all rates and the formed sheet test at lowest rate
if np.max(x[:,0]) >= np.max(y[:, 0]):
trs = interpolate.interp1d(p[0]+x[:,0], p[1]+x[:,1], bounds_error=False, fill_value=0)
res = trs(y[:,0])
else:
trs = interpolate.interp1d(-p[0]+y[:,0], -p[1]+y[:,1], bounds_error=False, fill_value=0)
res = trs(x[:,0])
return res
def read_files(origin_xlsx, formed_xlsx):
#TODO: Read the xlsx files of origin tests of all rates and the formed sheet test at lowest rate
origin = pd.ExcelFile(origin_xlsx)
formed = pd.ExcelFile(formed_xlsx)
origin_names = sorted(origin.sheet_names, key=lambda x: float(x))
#curves = pd.read_excel("220.xlsx").values
#print(origin_names)
original_all = {}
for name in origin_names:
original_all[name] = (pd.read_excel(origin, name).values)
#original = pd.read_excel(origin_xlsx, origin_names[0]).values
# #print(original)
formed = pd.read_excel(formed, 0).values # formed_copy = formed.copy()
# #print(formed)
# #original = np.array(list(filter(lambda x: not np.isnan(x[1]), original)))
# #neck = np.argmax(original[:,-1])
# #original = original[:neck,:]
# #original = np.array(list(filter(lambda x: x[0]>0.002, original)))
# #print(original.shape)
return original_all, formed
def transform_low_rate(original, formed):
neck_original= np.argmax(original[:,-1])
original_neck = original[neck_original, 0]
#original_copy = original.copy()
formed = np.array(list(filter(lambda x: not | np.isnan(x[1]) | numpy.isnan |
import scipy.signal as signal
import copy
import numpy as np
import ray
import os
import imageio
from Env_Builder import *
from Map_Generator2 import maze_generator
from parameters import *
# helper functions
def discount(x, gamma):
return signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
class Worker():
def __init__(self, metaAgentID, workerID, workers_per_metaAgent, env, localNetwork, sess, groupLock, learningAgent, global_step):
self.metaAgentID = metaAgentID
self.agentID = workerID
self.name = "worker_" + str(workerID)
self.num_workers = workers_per_metaAgent
self.global_step = global_step
self.nextGIF = 0
self.env = env
self.local_AC = localNetwork
self.groupLock = groupLock
self.learningAgent = learningAgent
self.sess = sess
self.loss_metrics = None
self.perf_metrics = None
self.allGradients = []
def __del__(self):
if NN_DEBUG_MODE:
print('((worker)__del__)meta{0}worker{1}'.format(self.metaAgentID, self.agentID))
def calculateImitationGradient(self, rollout, episode_count): # todo: check rollout
rollout = np.array(rollout, dtype=object)
# we calculate the loss differently for imitation
# if imitation=True the rollout is assumed to have different dimensions:
# [o[0],o[1],optimal_actions]
target_meangoal = rollout[:, 2]
target_block = rollout[:, 6]
rewards = rollout[:, 7]
advantages = rollout[:, 8]
# rnn_state = self.local_AC.state_init
# s1Value = self.sess.run(self.local_AC.value,
# feed_dict={self.local_AC.inputs : np.stack(rollout[:, 0]),
# self.local_AC.goal_pos : np.stack(rollout[:, 1]),
# self.local_AC.state_in[0]: rnn_state[0],
# self.local_AC.state_in[1]: rnn_state[1]})[0, 0]
#
# v = self.sess.run([self.local_AC.value,
# ],
# # todo: feed the message(last time step) here
# feed_dict={self.local_AC.inputs: np.stack(rollout[:, 0]), # state
# self.local_AC.goal_pos: np.stack(rollout[:, 1]), # goal vector
# self.local_AC.state_in[0]: rnn_state[0],
# self.local_AC.state_in[1]: rnn_state[1],
# })
# values = v[0,0]
# self.rewards_plus = np.asarray(rewards.tolist() + [s1Value])
# discounted_rewards = discount(self.rewards_plus, gamma)[:-1]
# self.value_plus = np.asarray(values.tolist() + [s1Value])
# advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
# advantages = discount(advantages, gamma)
temp_actions = np.stack(rollout[:, 3])
rnn_state = self.local_AC.state_init
feed_dict = {self.global_step : episode_count,
self.local_AC.inputs : np.stack(rollout[:, 0]),
self.local_AC.goal_pos : np.stack(rollout[:, 1]),
self.local_AC.optimal_actions: np.stack(rollout[:, 3]),
self.local_AC.state_in[0] : rnn_state[0],
self.local_AC.state_in[1] : rnn_state[1],
self.local_AC.train_imitation: (rollout[:, 4]),
self.local_AC.target_v : np.stack(temp_actions),
self.local_AC.train_value : temp_actions,
self.local_AC.advantages : advantages,
self.local_AC.target_meangoals : np.stack(target_meangoal),
self.local_AC.target_blockings : np.stack(target_block),
}
# print('feed ', feed_dict)
v_l, i_l, local_vars, i_grads = self.sess.run([self.local_AC.value_loss,
self.local_AC.imitation_loss,
self.local_AC.local_vars,
self.local_AC.i_grads
],
feed_dict=feed_dict)
if NN_DEBUG_MODE:
print('v_l', v_l)
print('i_l', i_l)
# print('local_vars', local_vars)
print('l_v', local_vars)
# print('igrads', i_grads)
# raise(TypeError)
return [i_l], i_grads
def calculateGradient(self, rollout, bootstrap_value, episode_count, rnn_state0):
# ([s,a,r,s1,v[0,0]])
rollout = np.array(rollout, dtype=object) # todo: meangoal, blocking
inputs = rollout[:, 0]
goals = rollout[:, 6]
target_meangoal = rollout[:, 7]
target_block = rollout[:, 8]
# meangoal = rollout[:, -5]
# blocking = rollout[:, -4]
# message = rollout[:, -3]
actions = rollout[:, 1]
rewards = rollout[:, 2]
values = rollout[:, 4]
valids = rollout[:, 5]
train_value = rollout[:, -2]
train_policy = rollout[:, -1]
# Here we take the rewards and values from the rollout, and use them to
# generate the advantage and discounted returns. (With bootstrapping)
# The advantage function uses "Generalized Advantage Estimation"
self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = discount(self.rewards_plus, gamma)[:-1]
self.value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
advantages = discount(advantages, gamma)
num_samples = min(EPISODE_SAMPLES, len(advantages))
sampleInd = np.sort(np.random.choice(advantages.shape[0], size=(num_samples,), replace=False))
feed_dict = {
self.global_step : episode_count,
self.local_AC.target_v : np.stack(discounted_rewards),
self.local_AC.inputs : np.stack(inputs),
self.local_AC.goal_pos : np.stack(goals),
self.local_AC.actions : actions,
self.local_AC.target_meangoals : np.stack(target_meangoal),
self.local_AC.target_blockings : np.stack(target_block),
# self.local_AC.block : block,
# self.local_AC.message : message,
self.local_AC.train_valid: np.stack(valids),
self.local_AC.advantages : advantages,
self.local_AC.train_value: train_value,
self.local_AC.state_in[0]: rnn_state0[0],
self.local_AC.state_in[1]: rnn_state0[1],
# self.local_AC.train_policy: train_policy,
self.local_AC.train_valids: np.vstack(train_policy)
}
v_l, p_l, valid_l, e_l, g_n, v_n, blocking_l, meangoal_l, message_l, grads = self.sess.run([self.local_AC.value_loss,
self.local_AC.policy_loss,
self.local_AC.valid_loss,
self.local_AC.entropy,
self.local_AC.grad_norms,
self.local_AC.var_norms,
self.local_AC.blocking_loss,
self.local_AC.mean_goal_loss,
self.local_AC.message_loss,
self.local_AC.grads],
feed_dict=feed_dict)
return [v_l, p_l, valid_l, e_l, blocking_l, meangoal_l, message_l, g_n, v_n], grads
def imitation_learning_only(self, episode_count):
self.env._reset()
rollouts, targets_done = self.parse_path(episode_count)
# rollouts.append([])
if rollouts is None:
return None, 0
gradients = []
losses = []
for i in range(self.num_workers):
train_buffer = rollouts[i]
imitation_loss, grads = self.calculateImitationGradient(train_buffer, episode_count)
gradients.append(grads)
losses.append(imitation_loss)
return gradients, losses
def run_episode_multithreaded(self, episode_count, coord):
if NN_DEBUG_MODE:
print('(Worker-RL)Begin to run! meta:{0}, worker{1}'.format(self.metaAgentID, self.agentID))
if self.metaAgentID < NUM_IL_META_AGENTS:
assert(1==0)
# print("THIS CODE SHOULD NOT TRIGGER")
# self.is_imitation = True
# self.imitation_learning_only()
global episode_lengths, episode_mean_values, episode_invalid_ops, episode_stop_ops, episode_rewards, episode_finishes
# print('episode_mean_values', episode_lengths)
num_agents = self.num_workers
with self.sess.as_default(), self.sess.graph.as_default():
while self.shouldRun(coord, episode_count):
episode_buffer, episode_values = [], []
episode_reward = episode_step_count = episode_inv_count = targets_done =episode_stop_count = 0
self.synchronize()
# Initial state from the environment
if self.agentID == 1:
if NN_DEBUG_MODE:
print('(Worker-RL)self.env._reset(a) meta:{0}, worker{1}'.format(self.metaAgentID, self.agentID))
self.env._reset()
if NN_DEBUG_MODE:
print('(Worker-RL)self.env._reset(b) meta:{0}, worker{1}'.format(self.metaAgentID, self.agentID))
joint_observations[self.metaAgentID] = self.env._observe()
if NN_DEBUG_MODE:
print('(Worker-RL)self.synchronize(1a) meta:{0}, worker{1}'.format(self.metaAgentID, self.agentID))
self.synchronize() # synchronize starting time of the threads
if NN_DEBUG_MODE:
print('(Worker-RL)self.synchronize(1b) meta:{0}, worker{1}'.format(self.metaAgentID, self.agentID))
# Get Information For Each Agent
validActions = self.env.listValidActions(self.agentID, joint_observations[self.metaAgentID][self.agentID])
s = joint_observations[self.metaAgentID][self.agentID]
rnn_state = self.local_AC.state_init
rnn_state0 = rnn_state
self.synchronize() # synchronize starting time of the threads
swarm_reward[self.metaAgentID] = 0
swarm_targets[self.metaAgentID] = 0
episode_rewards[self.metaAgentID] = []
episode_finishes[self.metaAgentID] = []
episode_lengths[self.metaAgentID] = []
episode_mean_values[self.metaAgentID] = []
episode_invalid_ops[self.metaAgentID] = []
episode_stop_ops[self.metaAgentID] = []
# ===============================start training =======================================================================
# RL
if True:
# prepare to save GIF
saveGIF = False
global GIFS_FREQUENCY_RL
if OUTPUT_GIFS and self.agentID == 1 and ((not TRAINING) or (episode_count >= self.nextGIF)):
saveGIF = True
self.nextGIF = episode_count + GIFS_FREQUENCY_RL
GIF_episode = int(episode_count)
GIF_frames = [self.env._render()]
# start RL
self.env.finished = False
agent_done = False
while not self.env.finished:
if not agent_done:
# todo: add multi-output here
a_dist, v, rnn_state, \
blocking, meangoal, message = self.sess.run([self.local_AC.policy,
self.local_AC.value,
self.local_AC.state_out,
self.local_AC.blocking,
self.local_AC.mean_goal,
self.local_AC.message,
],
# todo: feed the message(last time step) here
feed_dict={self.local_AC.inputs : [s[0]], # state
self.local_AC.goal_pos : [s[1]], # goal vector
self.local_AC.state_in[0]: rnn_state[0],
self.local_AC.state_in[1]: rnn_state[1],
})
skipping_state = False
train_policy = train_val = 1
if not skipping_state and not agent_done:
if not (np.argmax(a_dist.flatten()) in validActions):
episode_inv_count += 1
train_val = 0
train_valid = np.zeros(a_size)
train_valid[validActions] = 1
valid_dist = | np.array([a_dist[0, validActions]]) | numpy.array |
# coding=utf-8
from enum import Enum, IntEnum, auto
import time
import random
import math
import numpy
import cv2
from utils import readimage, writeimage
import matching
class Phases(IntEnum):
"""任务执行的阶段"""
BEGIN = 0
RUNNING = 1
END = 2
class Results(Enum):
"""任务执行的结果"""
PASS = auto()
SUCCESS = auto()
FAIL = auto()
tasks = []
def getTasks():
"""获取任务列表"""
return tasks
def registerTask(task, name, desc):
"""注册任务(已弃用,请使用装饰器注册任务)"""
task.name = name
task.description = desc
tasks.append(task)
return
def Task(name, desc):
"""用于自动注册任务的装饰器"""
def decorator(cls):
registerTask(cls, name, desc)
return cls
return decorator
class TaskBase(object):
"""自动挂机任务的基类"""
name = ""
description = ""
def __init__(self):
"""初始化"""
return
def init(self):
self.image = None
return
def begin(self, player, t):
"""开始任务"""
return Results.FAIL
def run(self, player, t):
"""执行任务"""
return Results.FAIL
def end(self, player, t):
"""结束任务"""
return Results.FAIL
def getImageCache(self):
"""获取用于预览的图像,如果不想显示请返回None"""
return self.image
@Task("自动客潮", "请将界面停留在餐厅")
class TaskKeChao(TaskBase):
"""客潮自动化"""
def __init__(self):
super().__init__()
self.templateButton = readimage("kechao_btn")
self.templateDish = readimage("kechao_dish_part")
self.templateTitle = readimage("kechao_title_part")
return
def init(self):
super().init()
self.lastTime = 0
# 0:餐厅界面 1:客潮对话框 2:客潮进行中 3:客潮结束结算界面
self.step = 0
self.pointCache = []
return
def begin(self, player, t):
"""需要玩家位于餐厅界面"""
self.image = player.screenshot()
if self.step == 0:
points = findcircle(self.image, 25)
for x, y in points:
if x > (self.image.shape[1] * 0.9) and y < (self.image.shape[0] * 0.2):
# 找到客潮按钮
player.clickaround(x, y)
self.lastTime = t
self.step = 1
return Results.PASS
if t - self.lastTime > 3:
# 没找到客潮按钮且超时
print("未找到客潮按钮, 请确认您正位于餐厅界面")
return Results.FAIL
elif self.step == 1 or self.step == 2:
if t - self.lastTime > 2:
points = findtemplate(self.image, self.templateButton)
for x, y in points:
# 找到开始按钮
if self.step == 2:
print("点击按钮后没有开始, 可能是客潮开启次数不足")
return Results.FAIL
player.clickaround(x, y)
self.lastTime = t
self.step = 2
return Results.PASS
# 找不到开始按钮
if self.step == 2:
# 点过开始按钮了, 进入客潮
self.lastTime = t
print("进入客潮")
return Results.SUCCESS
# 还没点过开始按钮, 回退到第0步
self.lastTime = t
self.step = 0
return Results.PASS
return Results.PASS
def run(self, player, t):
"""客潮挂机中"""
self.image = player.screenshot()
# 处理点的缓存
self.pointCache = [(x, y, time - 1) for x, y, time in self.pointCache if time > 1]
# 识别圆来寻找菜(旧版本用模版匹配, 效果不好)
points = findcircle(self.image, 25)
points2 = []
for x, y in points:
if x > (self.image.shape[1] * 0.9): continue
if y > (self.image.shape[0] * 0.8):
# 客潮结束回到餐厅
self.lastTime = t
self.step = 3
print("客潮结束")
return Results.SUCCESS
cv2.circle(self.image, (x, y), 25, (0, 0, 255), 3)
if not self.containpoint(x, y):
points2.append((x, y))
if len(points2) > 0:
x, y = random.choice(points2)
player.clickaround(x, y)
self.pointCache.append((x, y, 10))
self.lastTime = t
return Results.PASS
if t - self.lastTime > 15:
# 没人点菜, 停止挂机?
print("超过15秒钟没有客人点菜了, 停止挂机")
return Results.FAIL
return Results.PASS
def end(self, player, t):
"""客潮结束"""
self.image = player.screenshot()
if self.step == 3:
if t - self.lastTime > 2:
points = findtemplate(self.image, self.templateTitle)
for x, y in points:
# 正位于客潮结算界面
filename = "KeChao_" + time.strftime("%Y-%m-%d-%H-%M-%S")
writeimage(filename, self.image)
print("已将客潮结算界面截图保存至: saved/%s.png" % filename)
player.clickaround(x, y)
self.lastTime = t
return Results.PASS
# 结算完了
self.lastTime = t
return Results.SUCCESS
return Results.PASS
def containpoint(self, x, y):
for cx, cy, time in self.pointCache:
if math.sqrt(math.pow(int(x) - int(cx), 2) + math.pow(int(y) - int(cy), 2)) < 5:
return True
return False
class TaskMiniGame(TaskBase):
"""活动小游戏挂机任务的基类"""
def __init__(self):
super().__init__()
self.templateButton = readimage("minigame_btn")
return
def init(self):
self.lastTime = 0
# 是否点击过开始按钮了
self.started = False
return
def begin(self, player, t):
"""需要玩家位于小游戏界面"""
self.image = player.screenshot()
if not self.started:
points = findtemplate(self.image, self.templateButton)
for x, y in points:
cv2.circle(self.image, (x, y), 40, (0, 0, 255), 2)
player.click(x, y)
self.lastTime = t
self.started = True
return Results.PASS
if t - self.lastTime > 3:
# 没找到开始按钮且超时
print("未找到开始按钮, 请确认您正位于小游戏界面")
return Results.FAIL
elif t - self.lastTime > 1:
self.lastTime = t
return Results.SUCCESS
return Results.PASS
try:
from constant import *
except:
# **若想使用请自行修改以下数据**
# 消除的时间间隔
TIME_INTERVAL = 0.5
# 游戏区域距离屏幕左方的距离
MARGIN_LEFT = 0
# 游戏区域距离屏幕顶部的距离
MARGIN_TOP = 0
# 横向方块数量
HORIZONTAL_NUM = 10
# 纵向方块数量
VERTICAL_NUM = 10
# 方块宽度
SQUARE_WIDTH = 100
# 方块高度
SQUARE_HEIGHT = 100
# 切片处理时的左上和右下坐标
SUB_LT_X = 20
SUB_LT_Y = 20
SUB_RB_X = 80
SUB_RB_Y = 80
@Task("自动小游戏-千人千面", "需自行修改代码进行配置")
class TaskQianRenQianMian(TaskMiniGame):
"""千人千面自动连连看"""
def init(self):
super().init()
self.result = None
self.pair = None
return
def run(self, player, t):
"""小游戏挂机中"""
self.image = player.screenshot()
for j in range(VERTICAL_NUM):
for i in range(HORIZONTAL_NUM):
x = MARGIN_LEFT + i * SQUARE_WIDTH
y = MARGIN_TOP + j * SQUARE_HEIGHT
cv2.rectangle(self.image, (x, y), (x + SQUARE_WIDTH, y + SQUARE_HEIGHT), (0, 255, 0), 1)
if self.result is None:
# 图像切片并保存在数组中
squares = []
for j in range(VERTICAL_NUM):
for i in range(HORIZONTAL_NUM):
x = MARGIN_LEFT + i * SQUARE_WIDTH
y = MARGIN_TOP + j * SQUARE_HEIGHT
square = self.image[y : y + SQUARE_HEIGHT, x : x + SQUARE_WIDTH]
# 每个方块向内缩小一部分防止边缘不一致造成干扰
square = square[SUB_LT_Y : SUB_RB_Y, SUB_LT_X : SUB_RB_X]
squares.append(square)
# 相同的方块作为一种类型放在数组中
types = []
for square in squares:
if self.isbackground(square):
continue
if not self.isimageexist(square, types):
types.append(square)
# 将切片处理后的图片数组转换成相对应的数字矩阵
self.result = []
num = 0
for j in range(VERTICAL_NUM):
line = []
for i in range(HORIZONTAL_NUM):
if self.isbackground(squares[num]):
line.append(0)
else:
for t in range(len(types)):
if isimagesame(squares[num], types[t]):
line.append(t + 1)
break
num += 1
self.result.append(line)
return Results.PASS
# 执行自动消除
if t - self.lastTime >= TIME_INTERVAL:
self.lastTime = t
# 第二次选择
if self.pair is not None:
player.click(self.pair[0] + SQUARE_WIDTH / 2, self.pair[1] + SQUARE_HEIGHT / 2)
self.pair = None
return Results.PASS
# 定位第一个选中点
for i in range(len(self.result)):
for j in range(len(self.result[0])):
if self.result[i][j] != 0:
# 定位第二个选中点
for m in range(len(self.result)):
for n in range(len(self.result[0])):
if self.result[m][n] != 0:
if matching.canConnect(i, j, m, n, self.result):
# 执行消除算法并进行第一次选择
self.result[i][j] = 0
self.result[m][n] = 0
x1 = MARGIN_LEFT + j * SQUARE_WIDTH
y1 = MARGIN_TOP + i * SQUARE_HEIGHT
x2 = MARGIN_LEFT + n * SQUARE_WIDTH
y2 = MARGIN_TOP + m * SQUARE_HEIGHT
player.click(x1 + SQUARE_WIDTH / 2, y1 + SQUARE_HEIGHT / 2)
self.pair = (x2, y2)
return Results.PASS
# TODO 判断一下出现结束画面才算完毕, 否则等待一会后重新规划
print("自动消除运行完毕")
return Results.SUCCESS
return Results.PASS
def isbackground(self, img):
# TODO 是否有更好的算法?
# OpenCV的顺序是BGR不是RGB...
return abs(img[:, :, 0].mean() - 54) <= 10 and abs(img[:, :, 1].mean() - 70) <= 20 and abs(img[:, :, 2].mean() - 105) <= 15
def isimageexist(self, img, img_list):
for existed_img in img_list:
if isimagesame(img, existed_img):
return True
return False
@Task("自动小游戏", "更多自动小游戏敬请期待...")
class TaskMoreMiniGames(TaskBase):
"""算了放弃了, 毁灭吧赶紧的"""
def begin(self, player, t):
print("我不想做了, 如果您需要的话可以自行编写挂机任务, 然后提交pr")
return Results.FAIL
def isimagesame(img1, img2, threshold = 0.5):
# TODO 是否有更好的算法?
# b = numpy.subtract(existed_img, img)
# return not numpy.any(b)
result = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
location = | numpy.where(result >= threshold) | numpy.where |
"""
Utils
=====
"""
import numpy as np
from acoustics.decibel import dbsum
SOUNDSPEED = 343.0
"""
Speed of sound in air.
"""
esum = dbsum
def mean_tl(tl, surfaces):
"""Mean tl."""
try:
tau_axis = tl.ndim - 1
except AttributeError:
tau_axis = 0
tau = 1.0 / (10.0**(tl / 10.0))
return 10.0 * np.log10(1.0 / | np.average(tau, tau_axis, surfaces) | numpy.average |
# Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# Copyright (c) 2015, <NAME> and <NAME>.
# License: GNU-GPL Style.
# How to cite GBpy:
# Banadaki, <NAME>. & <NAME>. "An efficient algorithm for computing the primitive
# bases of a general lattice plane",
# Journal of Applied Crystallography 48, 585-588 (2015). doi:10.1107/S1600576715004446
import numpy as np
from . import integer_manipulations as int_man
from . import misorient_fz as mis_fz
from . import tools as trans
import numpy.linalg as nla
def proper_ptgrp(cryst_ptgrp):
"""
Returns the proper point group corresponding to a crystallographic point
group
Parameters
----------------
cryst_ptgrp: str
Crystallogrphic point group in Schoenflies notation
Returns
----------
proper_ptgrp: str
Proper point group in Schoenflies notation
"""
if cryst_ptgrp in ['D3', 'D3d']:
proper_ptgrp = 'D3'
if cryst_ptgrp in ['D4', 'D4h']:
proper_ptgrp = 'D4'
if cryst_ptgrp in ['D6', 'D6h']:
proper_ptgrp = 'D6'
if cryst_ptgrp in ['O', 'Oh']:
proper_ptgrp = 'O'
# prop_grps = ['C1', 'C2', 'C3', 'C4', 'C6', 'D2', 'D3', 'D4', 'D6',
# 'T', 'O']
# laue_grps = ['Ci', 'C2h', 'C3i', 'C4h', 'C6h', 'D2h', 'D3d', 'D4h', 'D6h',
# 'Th', 'Oh']
# if cryst_ptgrp in laue_grps:
# proper_ptgrp =
# elif cryst_ptgrp in prop_grps:
# proper_ptgrp = cryst_ptgrp
return proper_ptgrp
def largest_odd_factor(var_arr):
"""
Function that computes the larges odd factors of an array of integers
Parameters
-----------------
var_arr: numpy.array
Array of integers whose largest odd factors needs to be computed
Returns
------------
odd_d: numpy.array
Array of largest odd factors of each integer in var_arr
"""
if var_arr.ndim == 1:
odd_d = np.empty(np.shape(var_arr))
odd_d[:] = np.NaN
ind1 = np.where((np.remainder(var_arr, 2) != 0) | (var_arr == 0))[0]
if np.size(ind1) != 0:
odd_d[ind1] = var_arr[ind1]
ind2 = np.where((np.remainder(var_arr, 2) == 0) & (var_arr != 0))[0]
if np.size(ind2) != 0:
odd_d[ind2] = largest_odd_factor(var_arr[ind2] / 2.0)
return odd_d
else:
raise Exception('Wrong Input Type')
def compute_inp_params(lattice, sig_type):
# Leila: for the tolerance value for D6 I chose 1e-2
# to get the values of mu and nu in table 2 in grimmers paper.
"""
tau and kmax necessary for possible integer quadruple combinations
are computed
Parameters
----------------
lattice: class
Attributes of the underlying lattice class
sig_type: {'common', 'specific'}
Returns
-----------
tau: float
tau is a rational number :math:`= \\frac{\\nu}{\\mu}`
tau is equal to (a/c)^2
kmax: float
kmax is an integer that depends on :math:`\\mu \\ , \\nu`
for hcp: kmax equals to F/\Sigma. kmax is always a divisor of 12\\mu\\nu.
F/\Sigma is a dicisor of 6\\mu\\nu if \\nu is even and a divisor od 3\\mu\\nu
if \\nu is a multiple of 4.
"""
lat_params = lattice.lat_params
cryst_ptgrp = proper_ptgrp(lattice.cryst_ptgrp)
if cryst_ptgrp == 'D3':
c_alpha = np.cos(lat_params['alpha'])
tau = c_alpha / (1 + 2 * c_alpha)
if sig_type == 'specific':
[nu, mu] = int_man.rat_approx(tau, 1e-8)
rho = mu - 3 * nu
kmax = 4 * mu * rho
elif sig_type == 'common':
kmax = []
if cryst_ptgrp == 'D4':
tau = (lat_params['a'] ** 2) / (lat_params['c'] ** 2)
if sig_type == 'specific':
[nu, mu] = int_man.rat_approx(tau, 1e-8)
kmax = 4 * mu * nu
if sig_type == 'common':
kmax = []
if cryst_ptgrp == 'D6':
tau = (lat_params['a'] ** 2) / (lat_params['c'] ** 2)
if sig_type == 'specific':
[nu, mu] = int_man.rat_approx(tau, 1e-2)
if np.remainder(nu, 2) == 0:
if np.remainder(nu, 4) == 0:
kmax = 3 * mu * nu
else:
kmax = 6 * mu * nu
else:
kmax = 12 * mu * nu
if sig_type == 'common':
kmax = []
if cryst_ptgrp == 'O':
tau = 1
kmax = []
return tau, kmax
def mesh_muvw(cryst_ptgrp, sigma, sig_type, *args):
# Leila note, deleted the star and lines 208-210
# mu = args[0]['mu']
# nu = args[0]['nu']
# kmax = args[0]['kmax']
#delete lines 228-235
# uncomment lines 236-245
"""
Compute max allowed values of [m,U,V,W] and generates an array
of integer quadruples
Parameters
----------------
cryst_ptgrp: str
Proper point group in Schoenflies notation
sigma: int
Sigma number
sig_type: {'common', 'specific'}
args[0]: dic
keys: 'nu', 'mu', 'kmax'
Returns
-----------
Integer quadruple numpy array
"""
if sig_type == 'common':
if cryst_ptgrp == 'D3':
tu1 = np.ceil(2 * np.sqrt(sigma))
m_max = tu1
u_max = tu1
v_max = tu1
w_max = tu1
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [-v_max, v_max]
wlims = [0, w_max]
if cryst_ptgrp == 'D6':
tu1 = np.ceil(np.sqrt(sigma / 3.0))
tu2 = np.ceil(np.sqrt(sigma))
m_max = tu1
u_max = tu2
v_max = tu2
w_max = tu2
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [0, v_max]
wlims = [0, w_max]
if cryst_ptgrp == 'D4' or cryst_ptgrp == 'O':
t1 = np.ceil(np.sqrt(sigma))
m_max = t1
u_max = t1
v_max = t1
w_max = t1
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [0, v_max]
wlims = [0, w_max]
elif sig_type == 'specific':
mu = args[0]['mu']
nu = args[0]['nu']
kmax = args[0]['kmax']
if cryst_ptgrp == 'D3':
t1 = np.ceil(np.sqrt(sigma * kmax / (mu)))
t2 = np.ceil(np.sqrt(sigma * kmax / (mu - 2 * nu)))
m_max = t1
u_max = t2
v_max = t2
w_max = t2
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [-v_max, v_max]
wlims = [-w_max, w_max]
if cryst_ptgrp == 'D6':
m_max = np.ceil(np.sqrt(sigma * kmax / (3.0 * mu)))
u_max = np.ceil(np.sqrt(sigma * kmax / (nu)))
v_max = np.ceil(np.sqrt(sigma * kmax / (nu)))
w_max = np.ceil(np.sqrt(sigma * kmax / (mu)))
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [0, v_max]
wlims = [0, w_max]
if cryst_ptgrp == 'D4':
t1 = np.sqrt(sigma * kmax)
m_max = np.ceil(t1 / np.sqrt(mu))
u_max = np.ceil(t1 / np.sqrt(nu))
v_max = np.ceil(t1 / np.sqrt(nu))
w_max = np.ceil(t1 / np.sqrt(mu))
mlims = [0, m_max]
ulims = [0, u_max]
vlims = [0, v_max]
wlims = [0, w_max]
else:
raise Exception('sig_type: wrong input type')
m_var = np.arange(mlims[0], mlims[1] + 1, 1)
u_var = np.arange(ulims[0], ulims[1] + 1, 1)
v_var = np.arange(vlims[0], vlims[1] + 1, 1)
w_var = np.arange(wlims[0], wlims[1] + 1, 1)
[x1, x2, x3, x4] = np.meshgrid(m_var, u_var, v_var, w_var)
x1 = x1.ravel()
x2 = x2.ravel()
x3 = x3.ravel()
x4 = x4.ravel()
return np.vstack((x1, x2, x3, x4)).astype(dtype='int64')
def mesh_muvw_fz(quad_int, cryst_ptgrp, sig_type, *args):
"""
For given integer quadruples, the set belonging to the corresponding
fundamental zone are separated out and retruned.
Parameters
----------------
quad_int: numpy.array
Integer quadruples
cryst_ptgrp: str
Proper point group in Schoenflies notation
sig_type: {'common', 'specific'}
args[0]: dic
keys: 'nu', 'mu', 'kmax'
Returns
-----------
np.vstack((m, u, v, w)): numpy.array
Integer quadruple numpy array belonging to the fundamental zone
of the corresponding crystallographic point group
"""
m = quad_int[0, :]
u = quad_int[1, :]
v = quad_int[2, :]
w = quad_int[3, :]
if sig_type == 'specific':
if cryst_ptgrp == 'D3':
mu = args[0]['mu']
nu = args[0]['nu']
tau = float(nu)/float(mu)
cond0 = u + v + w >= 0
cond1 = (u >= w) & (v >= w)
condfin = cond0 & cond1
m = m[condfin]
u = u[condfin]
v = v[condfin]
w = w[condfin]
cond0 = 2*m >= np.sqrt(1 - 3*tau)*(u + w)
cond1 = m >= u + v + w
condfin = cond0 & cond1
m = m[condfin]
u = u[condfin]
v = v[condfin]
w = w[condfin]
if cryst_ptgrp == 'D4':
mu = args[0]['mu']
nu = args[0]['nu']
tau = float(nu)/float(mu)
cond0 = (u >= v)
cond1 = (m >= (np.sqrt(2) + 1) * w)
condfin = (cond0 & cond1)
m = m[condfin]
u = u[condfin]
v = v[condfin]
w = w[condfin]
cond0 = (m >= np.sqrt(tau) * u)
cond1 = (m >= np.sqrt(tau / 2) * (u + v))
condfin = (cond0 & cond1)
m = m[condfin]
u = u[condfin]
v = v[condfin]
w = w[condfin]
if cryst_ptgrp == 'D6':
# equation 14-18 grimmer paper
mu = args[0]['mu']
nu = args[0]['nu']
cond0 = (u >= 2 * v)
cond1 = (m >= (2 / np.sqrt(3) + 1) * w)
condfin = (cond0 & cond1)
m = m[condfin]
u = u[condfin]
v = v[condfin]
w = w[condfin]
condfin = (m >= (np.sqrt(nu / (4 * mu)) * u))
m = m[condfin]
u = u[condfin]
v = v[condfin]
w = w[condfin]
condfin = (m >= (np.sqrt(nu / (12 * mu)) * (u - 2 * v)))
m = m[condfin]
u = u[condfin]
v = v[condfin]
w = w[condfin]
return np.vstack((m, u, v, w))
def check_fsig_int(quad_int, cryst_ptgrp, sigma, *args):
"""
For specific sigma rotations, a function of m, U, V, W (fsig) is computed.
The ratio of fsig and sigma should be a divisor of kmax. This
condition is checked and those integer quadruples that satisfy
this condition are returned
Parameters
----------------
quad_int: numpy.array
Integer quadruples
cryst_ptgrp: str
Proper point group in Schoenflies notation
sigma: float
sigma number
args[0]: dic
keys: 'nu', 'mu', 'kmax'
Returns
-----------
quad_int: numpy.array
Integer quadruple array that satisfy the above mentioned condition
"""
mu = args[0]['mu']
nu = args[0]['nu']
kmax = args[0]['kmax']
m = quad_int[0, :]
u = quad_int[1, :]
v = quad_int[2, :]
w = quad_int[3, :]
sigma = float(sigma)
if cryst_ptgrp == 'D3':
# $\frac{F}{$\Sigma$}$ should be a divisor of kmax
# $\in (12\mu\nu, 6\mu\nu, 3\mu\nu)$
# Keep only those quadruples for which the above condition is met
fsig = ((mu * (m ** 2) + (mu - 2 * nu) * (u ** 2 + v ** 2 + w ** 2)
+ 2 * nu * (u * v + v * w + w * u)) / sigma)
cond1 = np.where(abs(fsig - np.round(fsig)) < 1e-06)[0]
cond2 = np.where(np.remainder(kmax, fsig[cond1]) == 0)[0]
quad_int = quad_int[:, cond1[cond2]]
if cryst_ptgrp == 'D4':
# $\frac{F}{$\Sigma$}$ should be a divisor of kmax
# $\in (12\mu\nu, 6\mu\nu, 3\mu\nu)$
# Keep only those quadruples for which the above condition is met
fsig = (mu * (m ** 2 + w ** 2) + nu * (u ** 2 + v ** 2)) / sigma
cond1 = np.where(abs(fsig - np.round(fsig)) < 1e-06)[0]
cond2 = np.where(np.remainder(kmax, fsig[cond1]) == 0)[0]
quad_int = quad_int[:, cond1[cond2]]
if cryst_ptgrp == 'D6':
# $\frac{F}{$\Sigma$}$ should be a divisor of kmax
# $\in (12\mu\nu, 6\mu\nu, 3\mu\nu)$
# Keep only those quadruples for which the above condition is met
fsig = ((mu * (3 * (m ** 2) + w ** 2) +
nu * (u ** 2 - u * v + v ** 2)) / sigma)
cond1 = np.where(abs(fsig - np.round(fsig)) < 1e-06)[0]
cond1 = cond1[cond1!=0]
cond2 = np.where(np.remainder(kmax, fsig[cond1]) == 0)[0]
quad_int = quad_int[:, cond1[cond2]]
return quad_int
def eliminate_idrots(quad_int):
"""
Eliminate the roations that belong to the identity matrix and return the
integer quadruples
Parameters
----------------
quad_int: numpy.array
Integer quadruple array.
Returns
-----------
quad_int: numpy.array
Integer quadruple array in which the rotations belong to the identity
matrix are eliminated.
"""
m = quad_int[0, :]
u = quad_int[1, :]
v = quad_int[2, :]
w = quad_int[3, :]
cond0 = (u == 0) & (v == 0) & (w == 0)
cond1 = (m == 0) & (v == 0) & (w == 0)
cond2 = (u == 0) & (m == 0) & (w == 0)
cond3 = (u == 0) & (v == 0) & (m == 0)
condfin = (cond0 | cond1 | cond2 | cond3)
quad_int = np.delete(quad_int, np.where(condfin), axis=1)
return quad_int
def sigtype_muvw(quad_int, cryst_ptgrp, sig_type):
"""
The type of integer quadruples are different for common and specific sigma
rotations. For example, for D4 point group, common rotations satisfy the
condition u = 0 and v = 0 or m = 0 and w = 0. The specific rotations belong
to the complimentary set. Depending on the sig_type (common, specific), the
appropriate set of the integer quadruples are returned.
Parameters
----------------
quad_int: numpy.array
Integer quadruples.
cryst_ptgrp: str
Proper point group in Schoenflies notation.
sig_type: {'common', 'specific'}
Returns
-----------
quad_int: numpy.array
Integer quadruple array that satisfy the above mentioned condition.
"""
m = quad_int[0, :]
u = quad_int[1, :]
v = quad_int[2, :]
w = quad_int[3, :]
if cryst_ptgrp == 'D3':
cond0 = (m == 0) & (u + v + w == 0)
cond1 = (u == v) & (v == w)
condfin = cond0 | cond1
if cryst_ptgrp == 'D4' or cryst_ptgrp == 'D6':
cond0 = (u == 0) & (v == 0)
cond1 = (m == 0) & (w == 0)
condfin = cond0 | cond1
if cryst_ptgrp == 'O':
cond0 = m >= u
cond1 = u >= v
cond2 = v >= w
condfin = cond0 & cond1 & cond2
if sig_type == 'specific':
condfin = ~condfin
return quad_int[:, condfin]
def eliminate_mults(quad_int):
"""
Divide all the integer quadruples by their corresponding least common
multiples and return the unique set of integer quadruples
Parameters
----------------
quad_int: numpy.array
Integer quadruples.
Returns
-----------
quad_int: numpy.array
Integer quadruple array that satisfy the above mentioned condition.
"""
quad_gcd = int_man.gcd_array(quad_int.astype(dtype='int64'), 'columns')
quad_gcd = np.tile(quad_gcd, (4, 1))
a = quad_int / quad_gcd
a = a.transpose()
b = np.ascontiguousarray(a).view(np.dtype((np.void,
a.dtype.itemsize * a.shape[1])))
quad_int = np.unique(b).view(a.dtype).reshape(-1, a.shape[1])
quad_int = quad_int.transpose()
return quad_int
def check_sigma(quad_int, sigma, cryst_ptgrp, sig_type, *args):
"""
The integer quadruples that correspond to a sigma rotation satisfy
certain conditions. These conditions are checked and all the
quadruples that do not meet these requirements are filtered
out. These conditions depend on the rotation type (common or
specific) and the lattice type (crystallogrphic point group and mu, nu)
Parameters
----------------
quad_int: numpy.array
Integer quadruples.
cryst_ptgrp: str
Proper point group in Schoenflies notation.
sig_type: {'common', 'specific'}
args[0]: dic
keys: 'nu', 'mu', 'kmax'
Returns
-----------
quad_int: numpy.array
Integer quadruple array that satisfy the above mentioned condition.
See Also
-----------
check_fsig_int
"""
m = quad_int[0, :]
u = quad_int[1, :]
v = quad_int[2, :]
w = quad_int[3, :]
tol = 1e-10
if sig_type == 'common':
if cryst_ptgrp == 'D3':
ind1 = np.where((u == v) & (v == w))[0]
cond1 = (( | np.remainder(m[ind1], 2) | numpy.remainder |
#!/usr/bin/env python
import numpy as np
import math
import rospy
import ros_numpy
import tf
import cv2
from tf.transformations import euler_from_quaternion
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image,LaserScan
class RotateServer(object):
''' Rotate scans and pano images into world coordinates '''
def __init__(self):
# Subscribe to panos and scans
self.pano_sub = rospy.Subscriber('theta/image/decompressed', Image, self.pano_received)
self.scan_sub = rospy.Subscriber('scan', LaserScan, self.scan_received)
self.scans = []
self.buffer_interval_sec = 0.1 # How many seconds between scans
self.buffer_size = 200 # Keep up to this many scans
# Need access to transforms to rotate scans and pano into world coordinates
self.tf_lis = tf.TransformListener()
# Publisher
self.pano_pub = rospy.Publisher('/theta/image/rotated', Image, queue_size=1)
self.scan_pub = rospy.Publisher('/scan/rotated', LaserScan, queue_size=1)
self.bridge = CvBridge()
rospy.loginfo('RotateServer launched')
rospy.spin()
def scan_received(self, scan):
''' Buffer scans for a period of time so they can be matched to pano '''
if not self.scans:
self.scans.append(scan)
else:
time_diff = (scan.header.stamp - self.scans[-1].header.stamp).to_sec()
if time_diff >= self.buffer_interval_sec:
self.scans.append(scan)
if len(self.scans) > self.buffer_size:
self.scans.pop(0)
def get_scan(self, stamp):
if not self.scans:
return None
ix = 0
lowest_diff = abs((self.scans[ix].header.stamp - stamp).to_sec())
for i,scan in enumerate(self.scans):
diff = abs((scan.header.stamp - stamp).to_sec())
if diff < lowest_diff:
lowest_diff = diff
ix = i
return self.scans[ix],lowest_diff
def pano_received(self, image):
scan,time_diff = self.get_scan(image.header.stamp)
if not scan:
rospy.logerr('RotateServer received pano image but no laser scan available. Please switch on the laser scanner!')
return
if time_diff > 10.0:
rospy.logerr('RotateServer received pano image but the laser scan is stale (timestamp diff %.1f secs). Is the laser scanner running?' % time_diff)
else:
rospy.logdebug('RotateServer received pano image and scan: timestamp diff %.1f secs' % time_diff)
try:
# Get transforms #TODO probably should give the theta camera a transform, rather than using base_footprint
pano_trans,pano_rot = self.tf_lis.lookupTransform('/map', '/base_footprint', rospy.Time(0))
scan_trans,scan_rot = self.tf_lis.lookupTransform('/map', '/hokuyo_laser_frame', rospy.Time(0)) # avoid extrapolation into past error
except Exception as e:
rospy.logerr('RotateServer could not get transform, dropping pano: %s' % str(e))
return
# Calculate heading, turning right is positive (z-up)
laser_heading_rad = euler_from_quaternion(scan_rot)[2]
pano_heading_rad = euler_from_quaternion(pano_rot)[2]
# Rotate the pano image
try:
cv_image = self.bridge.imgmsg_to_cv2(image)
x_axis = 1
roll_pixels = -int(pano_heading_rad / (math.pi * 2) * cv_image.shape[x_axis])
cv_image = | np.roll(cv_image, roll_pixels, axis=x_axis) | numpy.roll |
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import (
interact,
interactive,
IntSlider,
widget,
FloatText,
FloatSlider,
fixed,
)
########################################
# WIDGETS
########################################
def WidgetWaveRegime():
i = interact(
GPRWidgetWaveRegime,
sig=FloatSlider(
min=0.5,
max=5,
value=3,
step=0.25,
continuous_update=False,
description="$\sigma$ [mS/m]",
),
epsr=IntSlider(
min=1,
max=25,
value=4,
step=1,
continuous_update=False,
description="$\epsilon_r$",
),
fc=IntSlider(
min=50,
max=1000,
value=250,
step=25,
continuous_update=False,
description="$f_c$ [MHz]",
),
x1=FloatSlider(
min=-10,
max=10,
value=-4,
step=0.25,
continuous_update=False,
description="$x_1$ [m]",
),
d1=FloatSlider(
min=1,
max=15,
value=2,
step=0.25,
continuous_update=False,
description="$d_1$ [m]",
),
R1=FloatSlider(
min=0.1,
max=2,
value=0.1,
step=0.1,
continuous_update=False,
description="$R_1$ [m]",
),
x2=FloatSlider(
min=-10,
max=10,
value=4,
step=0.25,
continuous_update=False,
description="$x_2$ [m]",
),
d2=FloatSlider(
min=1,
max=15,
value=6,
step=0.25,
continuous_update=False,
description="$d_2$ [m]",
),
R2=FloatSlider(
min=0.1,
max=2,
value=0.1,
step=0.1,
continuous_update=False,
description="$R_2$ [m]",
),
)
return i
########################################
# FUNCTIONS
########################################
def GPRWidgetWaveRegime(sig, epsr, fc, x1, d1, R1, x2, d2, R2):
sig = 0.001 * sig # mS/m to S/m
fc = 1e6 * fc # MHz to Hz
# Compute Time and Offset Range
v = fcnComputeVelocity(epsr, sig, fc)
a = fcnComputeAlpha(epsr, sig, fc)
DOI = 3 / a
DOIt = 1e9 * (6 / a) / v # DOI equivalent time in ns
xmin, xmax, nx = -10.0, 10.0, 26
xrx = np.reshape(np.linspace(xmin, xmax, nx), (1, nx))
tmax = (8 / a) / v # 4 diffusion distances converted to time
nt = 501
t = np.reshape(np.linspace(0, tmax, nt), (nt, 1))
p = np.ones((1, nx))
q = np.ones((nt, 1))
T = np.kron(t, p)
XRX = np.kron(q, xrx)
Attn = np.exp(-a * v * T)
# Create Radargram Data
dx = (xmax - xmin) / (nx - 1)
xp = [x1, x2]
dp = [d1, d2]
R = [R1, R2]
for ii in range(0, 2):
tii = fcnComputePointTravelTime(xp[ii], dp[ii], R[ii], epsr, sig, fc, xrx)
Aii = (
0.6
* dx
* (
Attn * fcnGetRicker(fc, T - np.kron(tii, q))
+ 0.001 * np.random.normal(0, 1, (nt, nx))
)
/ Attn
)
XRX = XRX + Aii
# PLOTTING
FS = 18
dlim = 16
fig1 = plt.figure(figsize=(14, 6))
Ax1 = fig1.add_axes([0.03, 0, 0.44, 1])
ptArray = np.array([[xmin, 0], [xmax, 0.0], [xmax, dlim], [xmin, dlim]])
poly1 = plt.Polygon(
ptArray,
closed=True,
facecolor=((0.7, 0.7, 0.5)),
edgecolor=((0.2, 0.2, 0.2)),
lw=2.5,
)
Ax1.add_patch(poly1)
ptArray = np.array(
[[xmin, 0], [xmax, 0.0], [xmax, -0.25 * dlim], [xmin, -0.25 * dlim]]
)
poly2 = plt.Polygon(
ptArray,
closed=True,
facecolor=((0.8, 1, 1)),
edgecolor=((0.2, 0.2, 0.2)),
lw=2.5,
)
Ax1.add_patch(poly2)
Ax1.plot([xmin, xmax], [DOI, DOI], "r", ls="--", lw=2.5)
phi = np.linspace(0, 2 * np.pi, 31)
for ii in range(0, 2):
xs = xp[ii] + R[ii] * np.cos(phi)
ds = dp[ii] + R[ii] * np.sin(phi)
polyTemp = plt.Polygon(
np.c_[xs, ds],
closed=True,
facecolor=((0.5, 0.5, 0.5)),
edgecolor=((0.2, 0.2, 0.2)),
lw=3,
)
Ax1.add_patch(polyTemp)
Ax1.set_xlim(xmin, xmax)
Ax1.set_ylim(dlim, -0.25 * dlim)
Ax1.set_xticks(np.linspace(-10, 10, 11))
Ax1.set_yticks(np.linspace(-4, 16, 11))
plt.xticks(fontsize=FS)
plt.yticks(fontsize=FS)
plt.xlabel("X [m]", fontsize=FS + 4)
plt.ylabel("Depth [m]", fontsize=FS + 4)
Ax1.text(xmin + 0.2, DOI - 0.4, "$\mathbf{DOI}$", fontsize=FS + 2)
Ax2 = fig1.add_axes([0.56, 0, 0.44, 1])
Ax2.plot(XRX, 1e9 * T, "k")
Ax2.plot([xmin - dx, xmax + dx], [DOIt, DOIt], "r", ls="--", lw=3)
Ax2.set_xlim(xmin - dx, xmax + dx)
Ax2.set_ylim(1e9 * np.max(t), 0)
Ax2.set_xticks(np.linspace(-10, 10, 11))
plt.xticks(fontsize=FS)
plt.yticks(fontsize=FS)
plt.xlabel("X [m]", fontsize=FS + 4)
plt.ylabel("t [ns]", fontsize=FS + 4)
plt.show(fig1)
def fcnGetRicker(fc, t):
"""Compute Ricker wavelet for central operating frequency fc"""
A = (1 - 2 * (np.pi * fc * t) ** 2) * np.exp(-((np.pi * fc * t) ** 2))
return A
def fcnComputePointTravelTime(xp, dp, R, epsr, sig, fc, xrx):
"""Compute travel times for all zero-offset positions"""
# Compute Velocity
eps = epsr * 8.854e-12
# sig = 10**logsig
mu = 4 * np.pi * 1e-7
v = np.sqrt(2 / (mu * eps)) / np.sqrt(
np.sqrt(1 + (sig / (2 * np.pi * fc * eps)) ** 2) + 1
)
# Compute Travel Time
t = 2 * (np.sqrt((xrx - xp) ** 2 + dp ** 2) - R) / v
return t
def fcnComputeVelocity(epsr, sig, fc):
"""Compute propagation velocity"""
eps = epsr * 8.854e-12
# sig = 10**logsig
mu = 4 * np.pi * 1e-7
w = 2 * np.pi * fc
v = np.sqrt(2 / (mu * eps)) / np.sqrt( | np.sqrt(1 + (sig / (w * eps)) ** 2) | numpy.sqrt |
# importing the necessory library
import numpy as np
import pandas as pd
# defining the function to read the box boundry
def dimension(file):
f = open(file,'r')
content = f.readlines()
# stroring the each vertext point on the data list
data = []
v_info = []
vertices_data =[]
# cartesian_data =[]
# vt_p = []
for x in range(len(content)):
# checking the file content cartesian points or not
if "CARTESIAN_POINT" in content[x]:
d=content[x].replace(",","").split(" ")
# Storing the cartesian point (X,Y,Z)
cartesian_data=d[0],d[7],d[8],d[9]
data.append(cartesian_data)
# checking for the unit used in step file.
elif "LENGTH_UNIT" in content[x]:
d=content[x].replace(",","").split(" ")
length_unit = (d[11] +" "+ d[12]).replace(".","").title()
elif "VERTEX_POINT " in content[x]:
dt=content[x].replace(",","").split(" ")
vt_p=dt[0],dt[5]
v_info.append(vt_p)
else:
pass
df = pd.DataFrame (data, columns = ['Line_no','x','y','z'])
df = df.set_index("Line_no")
for value in range(len(v_info)):
x_p = df.at[v_info[value][1],'x']
y_p = df.at[v_info[value][1],'y']
z_p = df.at[v_info[value][1],'z']
Points = x_p,y_p,z_p
vertices_data.append(Points)
# storing all the vertices in np.array
vertices_data = np.array(vertices_data).astype(float)
# storing the X, Y, Z minimum and Maximum values
x_min=np.amin(vertices_data[:,0])
y_min=np.amin(vertices_data[:,1])
z_min=np.amin(vertices_data[:,2])
x_max=np.amax(vertices_data[:,0])
y_max=np.amax(vertices_data[:,1])
z_max= | np.amax(vertices_data[:,2]) | numpy.amax |
import numpy as np
"""
Random samples from exponentially tilted stable distribution.
Convert the same function used in BNPGraph matlab package by <NAME>
http://www.stats.ox.ac.uk/~caron/code/bnpgraph/index.html
Original reference from (Hofert, 2011).
samples = etstablernd(V0, alpha, tau, n) returns a n*1 vector of numbers
distributed fron an exponentially tilted stable distribution with Laplace
transform (in z)
exp(-V0 * ((z + tau)^alpha - tau^alpha))
References:
- <NAME>. Random variate generation for exponentially and polynomially
tilted stable distributions. ACM Transactions on Modeling and Computer
Simulation, vol. 19(4), 2009.
- <NAME>. Sampling exponentially tilted stable distributions.
ACM Transactions on Modeling and Computer Simulation, vol. 22(1), 2011.
"""
def gen_U(w1, w2, w3, gamma):
V = np.random.random()
W_p = np.random.random()
if gamma >= 1:
if V < w1 / (w1 + w2):
U = np.abs(np.random.standard_normal()) / np.sqrt(gamma)
else:
U = np.pi * (1. - W_p ** 2.)
else:
if V < w3 / (w3 + w2):
U = np.pi * W_p
else:
U = np.pi * (1. - W_p ** 2)
return U
def sinc(x):
return np.sin(x) / x
def ratio_B(x, sigma):
return sinc(x) / (sinc(sigma * x)) ** sigma / (sinc((1. - sigma) * x)) ** (1 - sigma)
def zolotarev(u, sigma):
return ((np.sin(sigma * u)) ** sigma * (np.sin((1. - sigma) * u)) ** (1.0 - sigma) / | np.sin(u) | numpy.sin |
import os
import json
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import trange
from torchmetrics import Metric as TorchMetric
from torch.utils.data import DataLoader, TensorDataset
from torch.optim.lr_scheduler import ReduceLROnPlateau
from pytorch_widedeep.metrics import Metric, MultipleMetrics
from pytorch_widedeep.wdtypes import * # noqa: F403
from pytorch_widedeep.callbacks import (
History,
Callback,
MetricCallback,
CallbackContainer,
LRShedulerCallback,
)
from pytorch_widedeep.utils.general_utils import Alias
from pytorch_widedeep.training._trainer_utils import (
save_epoch_logs,
print_loss_and_metric,
bayesian_alias_to_loss,
tabular_train_val_split,
)
from pytorch_widedeep.bayesian_models._base_bayesian_model import (
BaseBayesianModel,
)
class BayesianTrainer:
r"""Class to set the of attributes that will be used during the
training process.
Parameters
----------
model: ``BaseBayesianModel``
An object of class ``BaseBayesianModel``
objective: str
Defines the objective, loss or cost function.
Param aliases: ``loss_function``, ``loss_fn``, ``loss``,
``cost_function``, ``cost_fn``, ``cost``
Possible values are: 'binary', 'multiclass', 'regression'
custom_loss_function: ``nn.Module``, optional, default = None
object of class ``nn.Module``. If none of the loss functions
available suits the user, it is possible to pass a custom loss
function. See for example
:class:`pytorch_widedeep.losses.FocalLoss` for the required
structure of the object or the `Examples
<https://github.com/jrzaurin/pytorch-widedeep/tree/master/examples>`__
folder in the repo.
optimizer: ``Optimzer``, optional, default= None
An instance of Pytorch's ``Optimizer`` object
(e.g. :obj:`torch.optim.Adam()`). if no optimizer is passed it will
default to ``AdamW``.
lr_schedulers: ``LRScheduler``, optional, default=None
An instance of Pytorch's ``LRScheduler`` object (e.g
:obj:`torch.optim.lr_scheduler.StepLR(opt, step_size=5)`)
callbacks: List, optional, default=None
List with :obj:`Callback` objects. The three callbacks available in
``pytorch-widedeep`` are: ``LRHistory``, ``ModelCheckpoint`` and
``EarlyStopping``. The ``History`` and the ``LRShedulerCallback``
callbacks are used by default. This can also be a custom callback as
long as the object of type ``Callback``. See
:obj:`pytorch_widedeep.callbacks.Callback` or the `Examples
<https://github.com/jrzaurin/pytorch-widedeep/tree/master/examples>`__
folder in the repo
metrics: List, optional, default=None
- List of objects of type :obj:`Metric`. Metrics available are:
``Accuracy``, ``Precision``, ``Recall``, ``FBetaScore``,
``F1Score`` and ``R2Score``. This can also be a custom metric as
long as it is an object of type :obj:`Metric`. See
:obj:`pytorch_widedeep.metrics.Metric` or the `Examples
<https://github.com/jrzaurin/pytorch-widedeep/tree/master/examples>`__
folder in the repo
- List of objects of type :obj:`torchmetrics.Metric`. This can be any
metric from torchmetrics library `Examples
<https://torchmetrics.readthedocs.io/en/latest/references/modules.html#
classification-metrics>`_. This can also be a custom metric as
long as it is an object of type :obj:`Metric`. See `the instructions
<https://torchmetrics.readthedocs.io/en/latest/>`_.
verbose: int, default=1
Setting it to 0 will print nothing during training.
seed: int, default=1
Random seed to be used internally for train_test_split
Attributes
----------
cyclic_lr: bool
Attribute that indicates if the lr_scheduler is cyclic_lr
(i.e. ``CyclicLR`` or ``OneCycleLR``). See `Pytorch schedulers
<https://pytorch.org/docs/stable/optim.html>`_.
"""
@Alias( # noqa: C901
"objective",
["loss_function", "loss_fn", "loss", "cost_function", "cost_fn", "cost"],
)
def __init__(
self,
model: BaseBayesianModel,
objective: str,
custom_loss_function: Optional[Module] = None,
optimizer: Optimizer = None,
lr_scheduler: LRScheduler = None,
callbacks: Optional[List[Callback]] = None,
metrics: Optional[Union[List[Metric], List[TorchMetric]]] = None,
verbose: int = 1,
seed: int = 1,
**kwargs,
):
if objective not in ["binary", "multiclass", "regression"]:
raise ValueError(
"If 'custom_loss_function' is not None, 'objective' must be 'binary' "
"'multiclass' or 'regression', consistent with the loss function"
)
self.device, self.num_workers = self._set_device_and_num_workers(**kwargs)
self.model = model
self.early_stop = False
self.verbose = verbose
self.seed = seed
self.objective = objective
self.loss_fn = self._set_loss_fn(objective, custom_loss_function, **kwargs)
self.optimizer = (
optimizer
if optimizer is not None
else torch.optim.AdamW(self.model.parameters())
)
self.lr_scheduler = lr_scheduler
self._set_lr_scheduler_running_params(lr_scheduler, **kwargs)
self._set_callbacks_and_metrics(callbacks, metrics)
self.model.to(self.device)
def fit( # noqa: C901
self,
X_tab: np.ndarray,
target: np.ndarray,
X_tab_val: Optional[np.ndarray] = None,
target_val: Optional[np.ndarray] = None,
val_split: Optional[float] = None,
n_epochs: int = 1,
val_freq: int = 1,
batch_size: int = 32,
n_train_samples: int = 2,
n_val_samples: int = 2,
):
r"""Fit method.
Parameters
----------
X_tab: np.ndarray,
tabular dataset
target: np.ndarray
target values
X_tab_val: np.ndarray, Optional, default = None
validation data
target_val: np.ndarray, Optional, default = None
validation target values
val_split: float, Optional. default=None
An alterative to passing the validation set is to use a train/val
split fraction via 'val_split'
n_epochs: int, default=1
number of epochs
validation_freq: int, default=1
epochs validation frequency
batch_size: int, default=32
batch size
n_train_samples: int, default=2
number of samples to average over during the training process.
n_val_samples: int, default=2
number of samples to average over during the validation process.
"""
self.batch_size = batch_size
train_set, eval_set = tabular_train_val_split(
self.seed, self.objective, X_tab, target, X_tab_val, target_val, val_split
)
train_loader = DataLoader(
dataset=train_set, batch_size=batch_size, num_workers=self.num_workers
)
train_steps = len(train_loader)
if eval_set is not None:
eval_loader = DataLoader(
dataset=eval_set,
batch_size=batch_size,
num_workers=self.num_workers,
shuffle=False,
)
eval_steps = len(eval_loader)
self.callback_container.on_train_begin(
{
"batch_size": batch_size,
"train_steps": train_steps,
"n_epochs": n_epochs,
}
)
for epoch in range(n_epochs):
epoch_logs: Dict[str, float] = {}
self.callback_container.on_epoch_begin(epoch, logs=epoch_logs)
self.train_running_loss = 0.0
with trange(train_steps, disable=self.verbose != 1) as t:
for batch_idx, (X, y) in zip(t, train_loader):
t.set_description("epoch %i" % (epoch + 1))
train_score, train_loss = self._train_step(
X, y, n_train_samples, train_steps, batch_idx
)
print_loss_and_metric(t, train_loss, train_score)
self.callback_container.on_batch_end(batch=batch_idx)
epoch_logs = save_epoch_logs(epoch_logs, train_loss, train_score, "train")
on_epoch_end_metric = None
if eval_set is not None and epoch % val_freq == (val_freq - 1):
self.callback_container.on_eval_begin()
self.valid_running_loss = 0.0
with trange(eval_steps, disable=self.verbose != 1) as v:
for i, (X, y) in zip(v, eval_loader):
v.set_description("valid")
val_score, val_loss = self._eval_step(
X, y, n_val_samples, train_steps, i
)
print_loss_and_metric(v, val_loss, val_score)
epoch_logs = save_epoch_logs(epoch_logs, val_loss, val_score, "val")
if self.reducelronplateau:
if self.reducelronplateau_criterion == "loss":
on_epoch_end_metric = val_loss
else:
on_epoch_end_metric = val_score[
self.reducelronplateau_criterion
]
self.callback_container.on_epoch_end(epoch, epoch_logs, on_epoch_end_metric)
if self.early_stop:
self.callback_container.on_train_end(epoch_logs)
break
self.callback_container.on_train_end(epoch_logs)
self._restore_best_weights()
self.model.train()
def predict( # type: ignore[return]
self,
X_tab: np.ndarray,
n_samples: int = 5,
return_samples: bool = False,
batch_size: int = 256,
) -> np.ndarray:
r"""Returns the predictions
Parameters
----------
X_tab: np.ndarray,
tabular dataset
n_samples: int, default=5
number of samples that will be either returned or averaged to
produce an overal prediction
return_samples: bool, default = False
Boolean indicating whether the n samples will be averaged or directly returned
batch_size: int, default = 256
batch size
"""
preds_l = self._predict(X_tab, n_samples, return_samples, batch_size)
preds = np.hstack(preds_l) if return_samples else np.vstack(preds_l)
axis = 2 if return_samples else 1
if self.objective == "regression":
return preds.squeeze(axis)
if self.objective == "binary":
return (preds.squeeze(axis) > 0.5).astype("int")
if self.objective == "multiclass":
return np.argmax(preds, axis)
def predict_proba( # type: ignore[return]
self,
X_tab: np.ndarray,
n_samples: int = 5,
return_samples: bool = False,
batch_size: int = 256,
) -> np.ndarray:
r"""Returns the predicted probabilities
Parameters
----------
X_tab: np.ndarray,
tabular dataset
n_samples: int, default=5
number of samples that will be either returned or averaged to
produce an overal prediction
return_samples: bool, default = False
Boolean indicating whether the n samples will be averaged or directly returned
batch_size: int, default = 256
batch size
"""
preds_l = self._predict(X_tab, n_samples, return_samples, batch_size)
preds = | np.hstack(preds_l) | numpy.hstack |
'''
Created on Sep 27, 2011
@author: sean
'''
from __future__ import print_function
from opencl import get_platforms, Context, Queue, Program, DeviceMemoryView, empty
from opencl import ContextProperties, global_memory, UserEvent, Event
from opencl.kernel import parse_args
import opencl as cl
import unittest
import ctypes
import numpy as np
from threading import Event as PyEvent
import sys
import os
ctx = None
DEVICE_TYPE = None
def setUpModule():
global ctx, DEVICE_TYPE
DEVICE_TYPE_ATTR = os.environ.get('DEVICE_TYPE', 'DEFAULT')
DEVICE_TYPE = getattr(cl.Device, DEVICE_TYPE_ATTR)
ctx = cl.Context(device_type=DEVICE_TYPE)
print(ctx.devices)
source = """
__kernel void generate_sin(__global float2* a, float scale)
{
int id = get_global_id(0);
int n = get_global_size(0);
float r = (float)id / (float)n;
a[id].x = id;
a[id].y = native_sin(r) * scale;
}
"""
class Test(unittest.TestCase):
def test_platform_constructor(self):
with self.assertRaises(Exception):
cl.Platform()
def test_device_constructor(self):
with self.assertRaises(Exception):
cl.Device()
def test_get_platforms(self):
platforms = get_platforms()
def test_get_devices(self):
plat = get_platforms()[0]
devices = plat.devices
native_kernels = [dev.has_native_kernel for dev in devices]
def test_enqueue_native_kernel_refcount(self):
if not ctx.devices[0].has_native_kernel:
self.skipTest("Device does not support native kernels")
queue = Queue(ctx, ctx.devices[0])
def incfoo():
pass
self.assertEqual(sys.getrefcount(incfoo), 2)
e = cl.UserEvent(ctx)
queue.enqueue_wait_for_events(e)
queue.enqueue_native_kernel(incfoo)
self.assertEqual(sys.getrefcount(incfoo), 3)
e.complete()
queue.finish()
self.assertEqual(sys.getrefcount(incfoo), 2)
def test_enqueue_native_kernel(self):
if not ctx.devices[0].has_native_kernel:
self.skipTest("Device does not support native kernels")
queue = Queue(ctx, ctx.devices[0])
global foo
foo = 0
def incfoo(arg, op=lambda a, b: 0):
global foo
foo = op(foo, arg)
queue.enqueue_native_kernel(incfoo, 4, op=lambda a, b: a + b)
queue.enqueue_native_kernel(incfoo, 3, op=lambda a, b: a * b)
queue.finish()
self.assertEqual(foo, 12)
#
# def test_native_kernel_maps_args(self):
#
# if not ctx.devices[0].has_native_kernel:
# self.skipTest("Device does not support native kernels")
#
# queue = Queue(ctx, ctx.devices[0])
# a = cl.empty(ctx, [10], 'f')
#
#
# global foo
#
# foo = 0
#
# def incfoo(arg):
# global foo
#
# print 'arg', arg
#
# print "queue.enqueue_native_kernel"
# queue.enqueue_native_kernel(incfoo, a)
#
# print "queue.finish"
# queue.finish()
#
# print "self.assertEqual"
# self.assertEqual(foo, 12)
class TestDevice(unittest.TestCase):
def _test_device_properties(self):
device = ctx.devices[0]
print("device_type", device.device_type)
print("name", device.name)
print("has_image_support", device.has_image_support)
print("has_native_kernel", device.has_native_kernel)
print("max_compute_units", device.max_compute_units)
print("max_work_item_dimension", device.max_work_item_dimensions)
print("max_work_item_sizes", device.max_work_item_sizes)
print("max_work_group_size", device.max_work_group_size)
print("max_clock_frequency", device.max_clock_frequency, 'MHz')
print("address_bits", device.address_bits, 'bits')
print("max_read_image_args", device.max_read_image_args)
print("max_write_image_args", device.max_write_image_args)
print("max_image2d_shape", device.max_image2d_shape)
print("max_image3d_shape", device.max_image3d_shape)
print("max_parameter_size", device.max_parameter_size, 'bytes')
print("max_const_buffer_size", device.max_const_buffer_size, 'bytes')
print("has_local_mem", device.has_local_mem)
print("local_mem_size", device.local_mem_size, 'bytes')
print("host_unified_memory", device.host_unified_memory)
print("available", device.available)
print("compiler_available", device.compiler_available)
print("driver_version", device.driver_version)
print("device_profile", device.profile)
print("version", device.version)
print("extensions", device.extensions)
class TestContext(unittest.TestCase):
def test_properties(self):
platform = get_platforms()[0]
properties = ContextProperties()
properties.platform = platform
self.assertEqual(platform.name, properties.platform.name)
ctx = Context(device_type=DEVICE_TYPE, properties=properties)
class TestProgram(unittest.TestCase):
def test_program(self):
program = Program(ctx, source=source)
program.build()
def test_source(self):
program = Program(ctx, source=source)
self.assertEqual(program.source, source)
def test_binaries(self):
program = Program(ctx, source=source)
self.assertEqual(program.binaries, dict.fromkeys(ctx.devices))
program.build()
binaries = program.binaries
self.assertIsNotNone(binaries[ctx.devices[0]])
self.assertEqual(len(binaries[ctx.devices[0]]), program.binary_sizes[0])
program2 = Program(ctx, binaries=binaries)
self.assertIsNone(program2.source)
self.assertEqual(program2.binaries, binaries)
def test_constructor(self):
with self.assertRaises(TypeError):
Program(None, binaries=None)
with self.assertRaises(TypeError):
Program(ctx, binaries={None:None})
def test_devices(self):
program = Program(ctx, source=source)
program.build()
class TestKernel(unittest.TestCase):
def test_name(self):
program = Program(ctx, source=source)
program.build()
generate_sin = program.kernel('generate_sin')
self.assertEqual(generate_sin.name, 'generate_sin')
def test_argtypes(self):
program = Program(ctx, source=source)
program.build()
generate_sin = program.kernel('generate_sin')
generate_sin.argtypes = [DeviceMemoryView, ctypes.c_float]
with self.assertRaises(TypeError):
generate_sin.argtypes = [DeviceMemoryView, ctypes.c_float, ctypes.c_float]
def test_set_args(self):
program = Program(ctx, source=source)
program.build()
generate_sin = program.kernel('generate_sin')
generate_sin.argtypes = [global_memory(), ctypes.c_float]
buf = empty(ctx, [10], ctype=cl.cl_float2)
queue = Queue(ctx, ctx.devices[0])
generate_sin.set_args(buf, 1.0)
queue.enqueue_nd_range_kernel(generate_sin, 1, global_work_size=[buf.size])
expected = np.zeros([10], dtype=[('x', np.float32), ('y', np.float32)])
expected['x'] = | np.arange(10) | numpy.arange |
# Author: <NAME>
# License: BSD
import numpy as np
from seglearn.datasets import load_watch
from seglearn.base import TS_Data
def test_ts_data():
# time series data
ts = np.array([np.random.rand(100, 10), np.random.rand(200, 10), np.random.rand(20, 10)])
c = np.random.rand(3, 10)
data = TS_Data(ts, c)
assert np.array_equal(data.context_data, c)
assert np.array_equal(data.ts_data, ts)
assert isinstance(data[1], TS_Data)
assert np.array_equal(data[1].ts_data, ts[1])
assert np.array_equal(data[1].context_data, c[1])
# segmented time series data
sts = np.random.rand(100, 10, 6)
c = np.random.rand(100, 6)
data = TS_Data(sts, c)
assert isinstance(data[4:10], TS_Data)
assert np.array_equal(data[4:10].ts_data, sts[4:10])
assert np.array_equal(data[4:10].context_data, c[4:10])
sts = np.random.rand(100, 10)
c = np.random.rand(100)
data = TS_Data(sts, c)
assert isinstance(data[4:10], TS_Data)
assert np.array_equal(data[4:10].ts_data, sts[4:10])
assert | np.array_equal(data[4:10].context_data, c[4:10]) | numpy.array_equal |
"""Tests for functions that calculate plasma parameters."""
import numpy as np
import pytest
from astropy import units as u
from astropy.constants import c, e, m_e, m_p, mu0
from astropy.tests.helper import assert_quantity_allclose
from plasmapy.formulary.parameters import (
Alfven_speed,
betaH_,
Bohm_diffusion,
cs_,
cwp_,
DB_,
Debye_length,
Debye_number,
gyrofrequency,
gyroradius,
Hall_parameter,
inertial_length,
ion_sound_speed,
kappa_thermal_speed,
lambdaD_,
lower_hybrid_frequency,
magnetic_energy_density,
magnetic_pressure,
mass_density,
nD_,
oc_,
plasma_frequency,
pmag_,
pth_,
rc_,
rho_,
rhoc_,
thermal_pressure,
thermal_speed,
ub_,
upper_hybrid_frequency,
va_,
vth_,
vth_kappa_,
wc_,
wlh_,
wp_,
wuh_,
)
from plasmapy.particles.exceptions import InvalidParticleError
from plasmapy.utils.exceptions import (
PhysicsError,
PhysicsWarning,
RelativityError,
RelativityWarning,
)
from plasmapy.utils.pytest_helpers import assert_can_handle_nparray
B = 1.0 * u.T
Z = 1
ion = "p"
m_i = m_p
n_i = 5e19 * u.m ** -3
n_e = Z * 5e19 * u.m ** -3
rho = n_i * m_i + n_e * m_e
T_e = 1e6 * u.K
T_i = 1e6 * u.K
k_1 = 3e1 * u.m ** -1
k_2 = 3e7 * u.m ** -1
B_arr = np.array([0.001, 0.002]) * u.T
B_nanarr = np.array([0.001, np.nan]) * u.T
B_allnanarr = np.array([np.nan, np.nan]) * u.T
rho_arr = np.array([5e-10, 2e-10]) * u.kg / u.m ** 3
rho_infarr = np.array([np.inf, 5e19]) * u.m ** -3
rho_negarr = np.array([-5e19, 6e19]) * u.m ** -3
T_arr = np.array([1e6, 2e6]) * u.K
T_nanarr = np.array([1e6, np.nan]) * u.K
T_nanarr2 = np.array([np.nan, 2e6]) * u.K
T_allnanarr = np.array([np.nan, np.nan]) * u.K
T_negarr = np.array([1e6, -5151.0]) * u.K
V = 25.2 * u.m / u.s
V_arr = np.array([25, 50]) * u.m / u.s
V_nanarr = np.array([25, np.nan]) * u.m / u.s
V_allnanarr = np.array([np.nan, np.nan]) * u.m / u.s
mu = m_p.to(u.u).value
class Test_mass_density:
r"""Test the mass_density function in parameters.py."""
def test_particleless(self):
with pytest.raises(ValueError):
mass_density(1 * u.m ** -3)
def test_wrong_units(self):
with pytest.raises(u.UnitTypeError):
mass_density(1 * u.J)
def test_handle_nparrays(self):
"""Test for ability to handle numpy array quantities"""
assert_can_handle_nparray(mass_density)
# Assertions below that are in CGS units with 2-3 significant digits
# are generally from the NRL Plasma Formulary.
def test_Alfven_speed():
r"""Test the Alfven_speed function in parameters.py."""
# TODO: break this test up until multiple tests
assert np.isclose(
Alfven_speed(1 * u.T, 1e-8 * u.kg * u.m ** -3).value,
8920620.580763856,
rtol=1e-6,
)
V_A = Alfven_speed(B, n_i)
assert np.isclose(V_A.value, (B / np.sqrt(mu0 * n_i * (m_p + m_e))).si.value)
assert Alfven_speed(B, rho) == Alfven_speed(B, n_i)
assert Alfven_speed(B, rho).unit.is_equivalent(u.m / u.s)
assert Alfven_speed(B, rho) == Alfven_speed(-B, rho)
assert Alfven_speed(B, 4 * rho) == 0.5 * Alfven_speed(B, rho)
assert Alfven_speed(2 * B, rho) == 2 * Alfven_speed(B, rho)
# Case when Z=1 is assumed
with pytest.warns(RelativityWarning):
assert np.isclose(
Alfven_speed(5 * u.T, 5e19 * u.m ** -3, ion="H+"),
Alfven_speed(5 * u.T, 5e19 * u.m ** -3, ion="p"),
atol=0 * u.m / u.s,
rtol=1e-3,
)
# Case where magnetic field and density are Quantity arrays
V_A_arr = Alfven_speed(B_arr, rho_arr)
V_A_arr0 = Alfven_speed(B_arr[0], rho_arr[0])
V_A_arr1 = Alfven_speed(B_arr[1], rho_arr[1])
assert np.isclose(V_A_arr0.value, V_A_arr[0].value)
assert np.isclose(V_A_arr1.value, V_A_arr[1].value)
# Case where magnetic field is an array but density is a scalar Quantity
V_A_arr = Alfven_speed(B_arr, rho)
V_A_arr0 = Alfven_speed(B_arr[0], rho)
V_A_arr1 = Alfven_speed(B_arr[1], rho)
assert np.isclose(V_A_arr0.value, V_A_arr[0].value)
assert np.isclose(V_A_arr1.value, V_A_arr[1].value)
with pytest.raises(ValueError):
Alfven_speed(np.array([5, 6, 7]) * u.T, np.array([5, 6]) * u.m ** -3)
assert np.isnan(Alfven_speed(B_nanarr, rho_arr)[1])
with pytest.raises(ValueError):
Alfven_speed(B_arr, rho_negarr)
with pytest.raises(u.UnitTypeError):
Alfven_speed(5 * u.A, n_i, ion="p")
with pytest.raises(TypeError):
Alfven_speed(B, 5, ion="p")
with pytest.raises(u.UnitsError):
Alfven_speed(B, 5 * u.m ** -2, ion="p")
with pytest.raises(InvalidParticleError):
Alfven_speed(B, n_i, ion="spacecats")
with pytest.warns(RelativityWarning): # relativistic
Alfven_speed(5e1 * u.T, 5e19 * u.m ** -3, ion="p")
with pytest.raises(RelativityError): # super-relativistic
Alfven_speed(5e8 * u.T, 5e19 * u.m ** -3, ion="p")
with pytest.raises(ValueError):
Alfven_speed(0.001 * u.T, -5e19 * u.m ** -3, ion="p")
assert np.isnan(Alfven_speed(np.nan * u.T, 1 * u.m ** -3, ion="p"))
assert np.isnan(Alfven_speed(1 * u.T, np.nan * u.m ** -3, ion="p"))
with pytest.raises(RelativityError):
assert Alfven_speed(np.inf * u.T, 1 * u.m ** -3, ion="p") == np.inf * u.m / u.s
with pytest.raises(RelativityError):
assert Alfven_speed(-np.inf * u.T, 1 * u.m ** -3, ion="p") == np.inf * u.m / u.s
with pytest.warns(u.UnitsWarning):
assert Alfven_speed(1.0, n_i) == Alfven_speed(1.0 * u.T, n_i)
Alfven_speed(1 * u.T, 5e19 * u.m ** -3, ion="p")
# testing for user input z_mean
testMeth1 = Alfven_speed(1 * u.T, 5e19 * u.m ** -3, ion="p", z_mean=0.8).si.value
testTrue1 = 3084015.75214846
errStr = f"Alfven_speed() gave {testMeth1}, should be {testTrue1}."
assert np.isclose(testMeth1, testTrue1, atol=0.0, rtol=1e-6), errStr
assert_can_handle_nparray(Alfven_speed)
def test_ion_sound_speed():
r"""Test the ion_sound_speed function in parameters.py."""
assert np.isclose(
ion_sound_speed(
T_i=1.3232 * u.MK, T_e=1.831 * u.MK, ion="p", gamma_e=1, gamma_i=3
).value,
218816.06086407552,
)
assert np.isclose(
ion_sound_speed(
T_i=1.3232 * u.MK,
T_e=1.831 * u.MK,
n_e=n_e,
k=k_1,
ion="p",
gamma_e=1,
gamma_i=3,
).value,
218816.06086407552,
)
assert np.isclose(
ion_sound_speed(
T_i=1.3232 * u.MK,
T_e=1.831 * u.MK,
n_e=n_e,
k=k_2,
ion="p",
gamma_e=1,
gamma_i=3,
).value,
552.3212936293337,
)
assert np.isclose(
ion_sound_speed(
T_i=0.88 * u.MK,
T_e=1.28 * u.MK,
n_e=n_e,
k=0 * u.m ** -1,
ion="p",
gamma_e=1.2,
gamma_i=3.4,
).value,
193328.52857788358,
)
# case when Z=1 is assumed
# assert ion_sound_speed(T_i=T_i, T_e=T_e, ion='p+') == ion_sound_speed(T_i=T_i, T_e=T_e,
# ion='H-1')
assert ion_sound_speed(
T_i=T_i, T_e=0 * u.K, n_e=n_e, k=k_1, ion="p+"
).unit.is_equivalent(u.m / u.s)
with pytest.raises(RelativityError):
ion_sound_speed(T_i=T_i, T_e=T_e, n_e=n_e, k=k_1, gamma_i=np.inf)
with pytest.warns(PhysicsWarning):
ion_sound_speed(T_i=T_i, T_e=T_e, n_e=n_e)
with pytest.warns(PhysicsWarning):
ion_sound_speed(T_i=T_i, T_e=T_e, k=k_1)
with pytest.raises(u.UnitTypeError):
ion_sound_speed(
T_i=np.array([5, 6, 5]) * u.K,
T_e= | np.array([3, 4]) | numpy.array |
"""
=================
Linear Regression
=================
In this tutorial, we are going to demonstrate how to use the ``abess`` package to carry out best subset selection
in linear regression with both simulated data and real data.
"""
###############################################################################
#
# Our package ``abess`` implements a polynomial algorithm in the following best-subset selection problem:
#
# .. math::
# \min_{\beta\in \mathbb{R}^p} \frac{1}{2n} ||y-X\beta||^2_2,\quad \text{s.t.}\ ||\beta||_0\leq s,
#
#
# where :math:`\| \cdot \|_2` is the :math:`\ell_2` norm, :math:`\|\beta\|_0=\sum_{i=1}^pI( \beta_i\neq 0)`
# is the :math:`\ell_0` norm of :math:`\beta`, and the sparsity level :math:`s`
# is an unknown non-negative integer to be determined.
# Next, we present an example to show the ``abess`` package can get an optimal estimation.
#
# Toward optimality: adaptive best-subset selection
# ^^^^^^^^^^^^^^^^^^^^^^
#
# Synthetic dataset
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We generate a design matrix :math:`X` containing :math:`n = 300` observations and each observation has :math:`p = 1000` predictors.
# The response variable :math:`y` is linearly related to the first, second, and fifth predictors in :math:`X`:
#
# .. math::
# y = 3X_1 + 1.5X_2 + 2X_5 + \epsilon,
#
# where :math:`\epsilon` is a standard normal random variable.
import numpy as np
from abess.datasets import make_glm_data
np.random.seed(0)
n = 300
p = 1000
true_support_set=[0, 1, 4]
true_coef = np.array([3, 1.5, 2])
real_coef = | np.zeros(p) | numpy.zeros |
"""Wrapper functions with boilerplate code for making plots the way I like them
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import map
from builtins import range
from past.utils import old_div
import matplotlib
import matplotlib.patheffects as pe
import numpy as np, warnings
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import scipy.stats
from . import misc
import wwutils
import pandas
def alpha_blend_with_mask(rgb0, rgb1, alpha0, mask0):
"""Alpha-blend two RGB images, masking out one image.
rgb0 : first image, to be masked
Must be 3-dimensional, and rgb0.shape[-1] must be 3 or 4
If rgb0.shape[-1] == 4, the 4th channel will be dropped
rgb1 : second image, wil not be masked
Must be 3-dimensional, and rgb1.shape[-1] must be 3 or 4
If rgb1.shape[-1] == 4, the 4th channel will be dropped
Then, must have same shape as rgb0
alpha0 : the alpha to apply to rgb0. (1 - alpha) will be applied to
mask0 : True where to ignore rgb0
Must have dimension 2 or 3
If 2-dimensional, will be replicated along the channel dimension
Then, must have same shape as rgb0
Returns : array of same shape as rgb0 and rgb1
Where mask0 is True, the result is the same as rgb1
Where mask1 is False, the result is rgb0 * alpha0 + rgb1 * (1 - alpha0)
"""
# Replicate mask along color channel if necessary
if mask0.ndim == 2:
mask0 = np.stack([mask0] * 3, axis=-1)
# Check 3-dimensional
assert mask0.ndim == 3
assert rgb0.ndim == 3
assert rgb1.ndim == 3
# Drop alpha if present
if rgb0.shape[-1] == 4:
rgb0 = rgb0[:, :, :3]
if rgb1.shape[-1] == 4:
rgb1 = rgb1[:, :, :3]
if mask0.shape[-1] == 4:
mask0 = mask0[:, :, :3]
# Error check
assert rgb0.shape == rgb1.shape
assert mask0.shape == rgb0.shape
# Blend
blended = alpha0 * rgb0 + (1 - alpha0) * rgb1
# Flatten to apply mask
blended_flat = blended.flatten()
mask_flat = mask0.flatten()
replace_with = rgb1.flatten()
# Masked replace
blended_flat[mask_flat] = replace_with[mask_flat]
# Reshape to original
replaced_blended = blended_flat.reshape(blended.shape)
# Return
return replaced_blended
def custom_RdBu_r():
"""Custom RdBu_r colormap with true white at center"""
# Copied from matplotlib source: lib/matplotlib/_cm.py
# And adjusted to go to true white at center
_RdBu_data = (
(0.40392156862745099, 0.0 , 0.12156862745098039),
(0.69803921568627447, 0.09411764705882353, 0.16862745098039217),
(0.83921568627450982, 0.37647058823529411, 0.30196078431372547),
(0.95686274509803926, 0.6470588235294118 , 0.50980392156862742),
(0.99215686274509807, 0.85882352941176465, 0.7803921568627451 ),
(1,1,1),#(0.96862745098039216, 0.96862745098039216, 0.96862745098039216),
(0.81960784313725488, 0.89803921568627454, 0.94117647058823528),
(0.5725490196078431 , 0.77254901960784317, 0.87058823529411766),
(0.2627450980392157 , 0.57647058823529407, 0.76470588235294112),
(0.12941176470588237, 0.4 , 0.67450980392156867),
(0.0196078431372549 , 0.18823529411764706, 0.38039215686274508)
)
# Copied from matplotlib source: lib/matplotlib/cm.py
myrdbu = matplotlib.colors.LinearSegmentedColormap.from_list(
'myrdbu', _RdBu_data[::-1], matplotlib.rcParams['image.lut'])
# Return
return myrdbu
def smooth_and_plot_versus_depth(
data,
colname,
ax=None,
NS_sigma=40,
RS_sigma=20,
n_depth_bins=101,
depth_min=0,
depth_max=1600,
datapoint_plot_kwargs=None,
smoothed_plot_kwargs=None,
plot_layer_boundaries=True,
layer_boundaries_ylim=None,
):
"""Plot individual datapoints and smoothed versus depth.
data : DataFrame
Must have columns "Z_corrected", "NS", and `colname`, which become
x- and y- coordinates.
colname : string
Name of column containing data
ax : Axis, or None
if None, creates ax
NS_sigma, RS_sigma : float
The standard deviation of the smoothing kernel to apply to each
depth_min, depth_max, n_depth_bins : float, float, int
The x-coordinates at which the smoothed results are evaluated
datapoint_plot_kwargs : dict
Plot kwargs for individual data points.
Defaults:
'marker': 'o', 'ls': 'none', 'ms': 1.5, 'mew': 0, 'alpha': .25,
smoothed_plot_kwargs : dict
Plot kwargs for smoothed line.
Defaults: 'lw': 1.5, 'path_effects': path_effects
plot_layer_boundaries: bool
If True, plot layer boundaries
layer_boundaries_ylim : tuple of length 2, or None
If not None, layer boundaries are plotted to these ylim
If None, ax.get_ylim() is used after plotting everything else
Returns: ax
"""
## Set up defaults
# Bins at which to evaluate smoothed
depth_bins = np.linspace(depth_min, depth_max, n_depth_bins)
# datapoint_plot_kwargs
default_datapoint_plot_kwargs = {
'marker': 'o', 'ls': 'none', 'ms': 1, 'mew': 1,
'alpha': .3, 'mfc': 'none',
}
if datapoint_plot_kwargs is not None:
default_datapoint_plot_kwargs.update(datapoint_plot_kwargs)
use_datapoint_plot_kwargs = default_datapoint_plot_kwargs
# smoothed_plot_kwargs
path_effects = [pe.Stroke(linewidth=3, foreground='k'), pe.Normal()]
default_smoothed_plot_kwargs = {
'lw': 1.5,
'path_effects': path_effects,
}
if smoothed_plot_kwargs is not None:
default_smoothed_plot_kwargs.update(smoothed_plot_kwargs)
use_smoothed_plot_kwargs = default_smoothed_plot_kwargs
## Plot versus depth
if ax is None:
f, ax = plt.subplots()
# Iterate over NS
for NS, sub_data in data.groupby('NS'):
if NS:
color = 'b'
sigma = NS_sigma
else:
color = 'r'
sigma = RS_sigma
# Get the data to smooth
to_smooth = sub_data.set_index('Z_corrected')[colname]
# Smooth
smoothed = wwutils.misc.gaussian_sum_smooth_pandas(
to_smooth, depth_bins, sigma=sigma)
# Plot the individual data points
ax.plot(
to_smooth.index,
to_smooth.values,
color=color,
zorder=0,
**use_datapoint_plot_kwargs,
)
# Plot the smoothed
ax.plot(
smoothed, color=color,
**use_smoothed_plot_kwargs)
## Pretty
wwutils.plot.despine(ax)
ax.set_xticks((0, 500, 1000, 1500))
ax.set_xlim((0, 1500))
ax.set_xticklabels(('0.0', '0.5', '1.0', '1.5'))
ax.set_xlabel('depth in cortex (mm)')
## Add layer boundaries
if plot_layer_boundaries:
# ylim for the boundaries
if layer_boundaries_ylim is None:
layer_boundaries_ylim = ax.get_ylim()
# Layer boundaries
layer_boundaries = [128, 419, 626, 1006, 1366]
layer_names = ['L1', 'L2/3', 'L4', 'L5', 'L6', 'L6b']
# Centers of layers (for naming)
layer_depth_bins = np.concatenate(
[[-50], layer_boundaries, [1500]]).astype(np.float)
layer_centers = (layer_depth_bins[:-1] + layer_depth_bins[1:]) / 2.0
# Adjust position of L2/3 and L6 slightly
layer_centers[1] = layer_centers[1] - 50
layer_centers[2] = layer_centers[2] + 10
layer_centers[3] = layer_centers[3] + 25
layer_centers[-2] = layer_centers[-2] + 50
# Plot each (but not top of L1 or bottom of L6)
for lb in layer_boundaries[1:-1]:
ax.plot(
[lb, lb], layer_boundaries_ylim,
color='gray', lw=.8, zorder=-1)
# Set the boundaries tight
ax.set_ylim(layer_boundaries_ylim)
# Warn
if data[colname].max() > layer_boundaries_ylim[1]:
print(
"warning: max datapoint {} ".format(data[colname].max()) +
"greater than layer_boundaries_ylim[1]")
if data[colname].min() < layer_boundaries_ylim[0]:
print(
"warning: min datapoint {} ".format(data[colname].min()) +
"less than layer_boundaries_ylim[0]")
# Label the layer names
# x in data, y in figure
blended_transform = matplotlib.transforms.blended_transform_factory(
ax.transData, ax.figure.transFigure)
# Name each (but not L1 or L6b)
zobj = zip(layer_names[1:-1], layer_centers[1:-1])
for layer_name, layer_center in zobj:
ax.text(
layer_center, .98, layer_name,
ha='center', va='center', size=12, transform=blended_transform)
## Return ax
return ax
def plot_by_depth_and_layer(df, column, combine_layer_5=True, aggregate='median',
ax=None, ylim=None, agg_plot_kwargs=None, point_alpha=.5, point_ms=3,
layer_label_offset=-.1, agg_plot_meth='rectangle'):
"""Plot values by depth and layer
df : DataFrame
Should have columns 'Z_corrected', 'layer', 'NS', and `column`
column : name of column in `df` to plot
combine_layer_5 : whether to combine 5a and 5b
aggregate : None, 'mean', or 'median'
ax : where to plot
ylim : desired ylim (affects layer name position)
agg_plot_kwargs : how to plot aggregated
"""
# Set agg_plot_kwargs
default_agg_plot_kwargs = {'marker': '_', 'ls': 'none', 'ms': 16,
'mew': 4, 'alpha': .5}
if agg_plot_kwargs is not None:
default_agg_plot_kwargs.update(agg_plot_kwargs)
agg_plot_kwargs = default_agg_plot_kwargs
# Layer boundaries
layer_boundaries = [128, 419, 626, 1006, 1366]
layer_names = ['L1', 'L2/3', 'L4', 'L5', 'L6', 'L6b']
layer_depth_bins = np.concatenate([[-50], layer_boundaries, [1500]]).astype(np.float)
layer_centers = (layer_depth_bins[:-1] + layer_depth_bins[1:]) / 2.0
# Make a copy
df = df.copy()
# Optionally combine layers 5a and 5b
if combine_layer_5:
# Combine layers 5a and 5b
df['layer'] = df['layer'].astype(str)
df.loc[df['layer'].isin(['5a', '5b']), 'layer'] = '5'
# Optionally create figure
if ax is None:
f, ax = plt.subplots(figsize=(4.5, 3.5))
# Plot datapoints for NS and RS separately
NS_l = [False, True]
for NS, sub_df in df.groupby('NS'):
# Color by NS
color = 'b' if NS else 'r'
# Plot raw data
ax.plot(
sub_df.loc[:, 'Z_corrected'].values,
sub_df.loc[:, column].values,
color=color, marker='o', mfc='white',
ls='none', alpha=point_alpha, ms=point_ms, clip_on=False,
)
# Keep track of this
if ylim is None:
ylim = ax.get_ylim()
# Plot aggregates of NS and RS separately
if aggregate is not None:
for NS, sub_df in df.groupby('NS'):
# Color by NS
color = 'b' if NS else 'r'
# Aggregate over bins
gobj = sub_df.groupby('layer')[column]
counts_by_bin = gobj.size()
# Aggregate
if aggregate == 'mean':
agg_by_bin = gobj.mean()
elif aggregate == 'median':
agg_by_bin = gobj.median()
else:
raise ValueError("unrecognized aggregated method: {}".format(aggregate))
# Block out aggregates with too few data points
agg_by_bin[counts_by_bin <= 3] = np.nan
# Reindex to ensure this matches layer_centers
# TODO: Make this match the way it was aggregated
agg_by_bin = agg_by_bin.reindex(['1', '2/3', '4', '5', '6', '6b'])
assert len(agg_by_bin) == len(layer_centers)
if agg_plot_meth == 'markers':
# Plot aggregates as individual markers
ax.plot(
layer_centers,
agg_by_bin.values,
color=color,
**agg_plot_kwargs
)
elif agg_plot_meth == 'rectangle':
# Plot aggregates as a rectangle
for n_layer, layer in enumerate(['2/3', '4', '5', '6']):
lo_depth = layer_depth_bins[n_layer + 1]
hi_depth = layer_depth_bins[n_layer + 2]
value = agg_by_bin.loc[layer]
#~ ax.plot([lo_depth, hi_depth], [value, value],
#~ color='k', ls='-', lw=2.5)
#~ ax.plot([lo_depth, hi_depth], [value, value],
#~ color=color, ls='--', lw=2.5)
# zorder brings the patch on top of the datapoints
patch = plt.Rectangle(
(lo_depth + .1 * (hi_depth - lo_depth), value),
width=((hi_depth-lo_depth) * .8),
height=(.03 * np.diff(ylim)),
ec='k', fc=color, alpha=.5, lw=1.5, zorder=20)
ax.add_patch(patch)
# Plot layer boundaries, skipping L1 and L6b
for lb in layer_boundaries[1:-1]:
ax.plot([lb, lb], [ylim[0], ylim[1]], color='gray', ls='-', lw=1)
# Name the layers
text_ypos = ylim[1] + layer_label_offset * (ylim[1] - ylim[0])
for layer_name, layer_center in zip(layer_names, layer_centers):
if layer_name in ['L1', 'L6b']:
continue
ax.text(layer_center, text_ypos, layer_name[1:], ha='center', va='bottom',
color='k')
# Reset the ylim
ax.set_ylim(ylim)
# xticks
ax.set_xticks((200, 600, 1000, 1400))
ax.set_xticklabels([])
ax.set_xlim((100, 1500))
wwutils.plot.despine(ax)
ax.set_xlabel('depth in cortex')
return ax
def connected_pairs(v1, v2, p=None, signif=None, shapes=None, colors=None,
labels=None, ax=None):
"""Plot columns of (v1, v2) as connected pairs"""
import wwutils.stats
if ax is None:
f, ax = plt.subplots()
# Arrayify
v1 = np.asarray(v1)
v2 = np.asarray(v2)
if signif is None:
signif = np.zeros_like(v1)
else:
signif = np.asarray(signif)
# Defaults
if shapes is None:
shapes = ['o'] * v1.shape[0]
if colors is None:
colors = ['k'] * v1.shape[0]
if labels is None:
labels = ['' * v1.shape[1]]
# Store location of each pair
xvals = []
xvalcenters = []
# Iterate over columns
for n, (col1, col2, signifcol, label) in enumerate(zip(v1.T, v2.T, signif.T, labels)):
# Where to plot this pair
x1 = n * 2
x2 = n * 2 + 1
xvals += [x1, x2]
xvalcenters.append(np.mean([x1, x2]))
# Iterate over specific pairs
for val1, val2, sigval, shape, color in zip(col1, col2, signifcol, shapes, colors):
lw = 2 if sigval else 0.5
ax.plot([x1, x2], [val1, val2], marker=shape, color=color,
ls='-', mec=color, mfc='none', lw=lw)
# Plot the median
median1 = np.median(col1[~np.isnan(col1)])
median2 = np.median(col2[~np.isnan(col2)])
ax.plot([x1, x2], [median1, median2], marker='o', color='k', ls='-',
mec=color, mfc='none', lw=4)
# Sigtest on pop
utest_res = wwutils.stats.r_utest(col1[~np.isnan(col1)], col2[~np.isnan(col2)],
paired='TRUE', fix_float=1e6)
if utest_res['p'] < 0.05:
ax.text(np.mean([x1, x2]), 1.0, '*', va='top', ha='center')
# Label center of each pair
ax.set_xlim([xvals[0]-1, xvals[-1] + 1])
if labels:
ax.set_xticks(xvalcenters)
ax.set_xticklabels(labels)
return ax, xvals
def radar_by_stim(evoked_resp, ax=None, label_stim=True):
"""Given a df of spikes by stim, plot radar
evoked_resp should have arrays of counts indexed by all the stimulus
names
"""
from ns5_process import LBPB
if ax is None:
f, ax = plt.subplots(figsize=(3, 3), subplot_kw={'polar': True})
# Heights of the bars
evoked_resp = evoked_resp.ix[LBPB.mixed_stimnames]
barmeans = evoked_resp.apply(np.mean)
barstderrs = evoked_resp.apply(misc.sem)
# Set up the radar
radar_dists = [[barmeans[sname+block]
for sname in ['ri_hi', 'le_hi', 'le_lo', 'ri_lo']]
for block in ['_lc', '_pc']]
# make it circular
circle_meansLB = np.array(radar_dists[0] + [radar_dists[0][0]])
circle_meansPB = np.array(radar_dists[1] + [radar_dists[1][0]])
circle_errsLB = np.array([barstderrs[sname+'_lc'] for sname in
['ri_hi', 'le_hi', 'le_lo', 'ri_lo', 'ri_hi']])
circle_errsPB = np.array([barstderrs[sname+'_pc'] for sname in
['ri_hi', 'le_hi', 'le_lo', 'ri_lo', 'ri_hi']])
# x-values (really theta values)
xts = np.array([45, 135, 225, 315, 405])*np.pi/180.0
# Plot LB means and errs
#ax.errorbar(xts, circle_meansLB, circle_errsLB, color='b')
ax.plot(xts, circle_meansLB, color='b')
ax.fill_between(x=xts, y1=circle_meansLB-circle_errsLB,
y2=circle_meansLB+circle_errsLB, color='b', alpha=.5)
# Plot PB means and errs
ax.plot(xts, circle_meansPB, color='r')
ax.fill_between(x=xts, y1=circle_meansPB-circle_errsPB,
y2=circle_meansPB+circle_errsPB, color='r', alpha=.5)
# Tick labels
xtls = ['right\nhigh', 'left\nhigh', 'left\nlow', 'right\nlow']
ax.set_xticks(xts)
ax.set_xticklabels([]) # if xtls, will overlap
ax.set_yticks(ax.get_ylim()[1:])
ax.set_yticks([])
# manual tick
if label_stim:
for xt, xtl in zip(xts, xtls):
ax.text(xt, ax.get_ylim()[1]*1.25, xtl, size='large',
ha='center', va='center')
# pretty and save
#f.tight_layout()
return ax
def despine(ax, detick=True, which_ticks='both', which=('right', 'top')):
"""Remove the top and right axes from the plot
which_ticks : can be 'major', 'minor', or 'both
"""
for w in which:
ax.spines[w].set_visible(False)
if detick:
ax.tick_params(which=which_ticks, **{w:False})
return ax
def font_embed():
"""Produce files that can be usefully imported into AI"""
# For PDF imports:
# Not sure what this does
matplotlib.rcParams['ps.useafm'] = True
# Makes it so that the text is editable
matplotlib.rcParams['pdf.fonttype'] = 42
# For SVG imports:
# AI can edit the text but can't import the font itself
#matplotlib.rcParams['svg.fonttype'] = 'svgfont'
# seems to work better
matplotlib.rcParams['svg.fonttype'] = 'none'
def manuscript_defaults():
"""For putting into a word document.
Typical figure is approx 3"x3" panels. Apply a 50% scaling.
I think these defaults should be 14pt, actually.
"""
matplotlib.rcParams['font.sans-serif'] = 'Arial'
matplotlib.rcParams['axes.labelsize'] = 14
matplotlib.rcParams['axes.titlesize'] = 14
matplotlib.rcParams['xtick.labelsize'] = 14
matplotlib.rcParams['ytick.labelsize'] = 14
matplotlib.rcParams['font.size'] = 14 # ax.text objects
matplotlib.rcParams['legend.fontsize'] = 14
def poster_defaults():
"""For a poster
Title: 80pt
Section headers: 60pt
Body text: 40pt
Axis labels, tick marks, subplot titles: 32pt
Typical panel size: 6"
So it's easiest to just use manuscript_defaults() and double
the size.
"""
matplotlib.rcParams['font.sans-serif'] = 'Arial'
matplotlib.rcParams['axes.labelsize'] = 14
matplotlib.rcParams['axes.titlesize'] = 14
matplotlib.rcParams['xtick.labelsize'] = 14
matplotlib.rcParams['ytick.labelsize'] = 14
matplotlib.rcParams['font.size'] = 14 # ax.text objects
matplotlib.rcParams['legend.fontsize'] = 14
def presentation_defaults():
"""For importing into presentation.
Typical figure is 11" wide and 7" tall. No scaling should be necessary.
Typically presentation figures have more whitespace and fewer panels
than manuscript figures.
Actually I think the font size should not be below 18, unless really
necessary.
"""
matplotlib.rcParams['font.sans-serif'] = 'Arial'
matplotlib.rcParams['axes.labelsize'] = 18
matplotlib.rcParams['axes.titlesize'] = 18
matplotlib.rcParams['xtick.labelsize'] = 18
matplotlib.rcParams['ytick.labelsize'] = 18
matplotlib.rcParams['font.size'] = 18 # ax.text objects
matplotlib.rcParams['legend.fontsize'] = 18
def figure_1x1_small():
"""Smaller f, ax for single panel with a nearly square axis
"""
f, ax = plt.subplots(figsize=(2.2, 2))
# left = .3 is for the case of yticklabels with two signif digits
f.subplots_adjust(bottom=.28, left=.3, right=.95, top=.95)
return f, ax
def figure_1x1_square():
"""Standard size f, ax for single panel with a square axis
Room for xlabel, ylabel, and title in 16pt font
"""
f, ax = plt.subplots(figsize=(3, 3))
f.subplots_adjust(bottom=.23, left=.26, right=.9, top=.87)
return f, ax
def figure_1x1_standard():
"""Standard size f, ax for single panel with a slightly rectangular axis
Room for xlabel, ylabel, and title in 16pt font
"""
f, ax = plt.subplots(figsize=(3, 2.5))
f.subplots_adjust(bottom=.24, left=.26, right=.93, top=.89)
return f, ax
def figure_1x2_standard(**kwargs):
"""Standard size f, ax for single panel with a slightly rectangular axis
Room for xlabel, ylabel, and title in 16pt font
"""
f, axa = plt.subplots(1, 2, figsize=(6, 2.5), **kwargs)
f.subplots_adjust(left=.15, right=.9, wspace=.2, bottom=.22, top=.85)
return f, axa
def figure_1x2_small(**kwargs):
f, axa = plt.subplots(1, 2, figsize=(4, 2), **kwargs)
f.subplots_adjust(left=.2, right=.975, wspace=.3, bottom=.225, top=.8)
return f, axa
def rescue_tick(ax=None, f=None, x=3, y=3):
# Determine what axes to process
if ax is not None:
ax_l = [ax]
elif f is not None:
ax_l = f.axes
else:
raise ValueError("either ax or f must not be None")
# Iterate over axes to process
for ax in ax_l:
if x is not None:
ax.xaxis.set_major_locator(plt.MaxNLocator(x))
if y is not None:
ax.yaxis.set_major_locator(plt.MaxNLocator(y))
def crucifix(x, y, xerr=None, yerr=None, relative_CIs=False, p=None,
ax=None, factor=None, below=None, above=None, null=None,
data_range=None, axtype=None, zero_substitute=1e-6,
suppress_null_error_bars=False):
"""Crucifix plot y vs x around the unity line
x, y : array-like, length N, paired data
xerr, yerr : array-like, Nx2, confidence intervals around x and y
relative_CIs : if True, then add x to xerr (and ditto yerr)
p : array-like, length N, p-values for each point
ax : graphical object
factor : multiply x, y, and errors by this value
below : dict of point specs for points significantly below the line
above : dict of point specs for points significantly above the line
null : dict of point specs for points nonsignificant
data_range : re-adjust the data limits to this
axtype : if 'symlog' then set axes to symlog
"""
# Set up point specs
if below is None:
below = {'color': 'b', 'marker': '.', 'ls': '-', 'alpha': 1.0,
'mec': 'b', 'mfc': 'b'}
if above is None:
above = {'color': 'r', 'marker': '.', 'ls': '-', 'alpha': 1.0,
'mec': 'r', 'mfc': 'r'}
if null is None:
null = {'color': 'gray', 'marker': '.', 'ls': '-', 'alpha': 0.5,
'mec': 'gray', 'mfc': 'gray'}
# Defaults for data range
if data_range is None:
data_range = [None, None]
else:
data_range = list(data_range)
# Convert to array and optionally multiply
if factor is None:
factor = 1
x = np.asarray(x) * factor
y = np.asarray(y) * factor
# p-values
if p is not None:
p = np.asarray(p)
# Same with errors but optionally also reshape and recenter
if xerr is not None:
xerr = np.asarray(xerr) * factor
if xerr.ndim == 1:
xerr = np.array([-xerr, xerr]).T
if relative_CIs:
xerr += x[:, None]
if yerr is not None:
yerr = np.asarray(yerr) * factor
if yerr.ndim == 1:
yerr = np.array([-yerr, yerr]).T
if relative_CIs:
yerr += y[:, None]
# Create figure handles
if ax is None:
f = plt.figure()
ax = f.add_subplot(111)
# Plot each point
min_value, max_value = [], []
for n, (xval, yval) in enumerate(zip(x, y)):
# Get p-value and error bars for this point
pval = 1.0 if p is None else p[n]
xerrval = xerr[n] if xerr is not None else None
yerrval = yerr[n] if yerr is not None else None
# Replace neginfs
if xerrval is not None:
xerrval[xerrval == 0] = zero_substitute
if yerrval is not None:
yerrval[yerrval == 0] = zero_substitute
#~ if xval < .32:
#~ 1/0
# What color
if pval < .05 and yval < xval:
pkwargs = below
elif pval < .05 and yval > xval:
pkwargs = above
else:
pkwargs = null
lkwargs = pkwargs.copy()
lkwargs.pop('marker')
# Now actually plot the point
ax.plot([xval], [yval], **pkwargs)
# plot error bars, keep track of data range
if xerrval is not None and not (suppress_null_error_bars and pkwargs is null):
ax.plot(xerrval, [yval, yval], **lkwargs)
max_value += list(xerrval)
else:
max_value.append(xval)
# same for y
if yerrval is not None and not (suppress_null_error_bars and pkwargs is null):
ax.plot([xval, xval], yerrval, **lkwargs)
max_value += list(yerrval)
else:
max_value.append(xval)
# Plot the unity line
if data_range[0] is None:
data_range[0] = np.min(max_value)
if data_range[1] is None:
data_range[1] = np.max(max_value)
ax.plot(data_range, data_range, 'k:')
ax.set_xlim(data_range)
ax.set_ylim(data_range)
# symlog
if axtype:
ax.set_xscale(axtype)
ax.set_yscale(axtype)
ax.axis('scaled')
return ax
def scatter_with_trend(x, y, xname='X', yname='Y', ax=None,
legend_font_size='medium', **kwargs):
"""Scatter plot `y` vs `x`, also linear regression line
Kwargs sent to the point plotting
"""
if 'marker' not in kwargs:
kwargs['marker'] = '.'
if 'ls' not in kwargs:
kwargs['ls'] = ''
if 'color' not in kwargs:
kwargs['color'] = 'g'
x = | np.asarray(x) | numpy.asarray |
"""Probability distributions and auxiliary functions to deal with them."""
import numpy as np
import scipy.stats
from scipy.interpolate import interp1d, RegularGridInterpolator
import scipy.signal
import math
from flavio.math.functions import normal_logpdf, normal_pdf
from flavio.statistics.functions import confidence_level
import warnings
import inspect
from collections import OrderedDict
import yaml
import re
def _camel_to_underscore(s):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def string_to_class(string):
"""Get a ProbabilityDistribution subclass from a string. This can
either be the class name itself or a string in underscore format
as returned from `class_to_string`."""
try:
return eval(string)
except NameError:
pass
for c in ProbabilityDistribution.get_subclasses():
if c.class_to_string() == string:
return c
raise NameError("Distribution " + string + " not found.")
class ProbabilityDistribution(object):
"""Common base class for all probability distributions"""
def __init__(self, central_value, support):
self.central_value = central_value
self.support = support
@classmethod
def get_subclasses(cls):
"""Return all subclasses (including subclasses of subclasses)."""
for subclass in cls.__subclasses__():
yield from subclass.get_subclasses()
yield subclass
def get_central(self):
return self.central_value
@property
def error_left(self):
"""Return the lower error"""
return self.get_error_left()
@property
def error_right(self):
"""Return the upper error"""
return self.get_error_right()
@classmethod
def class_to_string(cls):
"""Get a string name for a given ProbabilityDistribution subclass.
This converts camel case to underscore and removes the word
'distribution'.
Example: class_to_string(AsymmetricNormalDistribution) returns
'asymmetric_normal'.
"""
name = _camel_to_underscore(cls.__name__)
return name.replace('_distribution', '')
def get_dict(self, distribution=False, iterate=False, arraytolist=False):
"""Get an ordered dictionary with arguments and values needed to
the instantiate the distribution.
Optional arguments (default to False):
- `distribution`: add a 'distribution' key to the dictionary with the
value being the string representation of the distribution's name
(e.g. 'asymmetric_normal').
- `iterate`: If ProbabilityDistribution instances are among the
arguments (e.g. for KernelDensityEstimate), return the instance's
get_dict instead of the instance as value.
- `arraytolist`: convert numpy arrays to lists
"""
args = inspect.signature(self.__class__).parameters.keys()
d = self.__dict__
od = OrderedDict()
if distribution:
od['distribution'] = self.class_to_string()
od.update(OrderedDict((a, d[a]) for a in args))
if iterate:
for k in od:
if isinstance(od[k], ProbabilityDistribution):
od[k] = od[k].get_dict(distribution=True)
if arraytolist:
for k in od:
if isinstance(od[k], np.ndarray):
od[k] = od[k].tolist()
if isinstance(od[k], list):
for i, x in enumerate(od[k]):
if isinstance(x, np.ndarray):
od[k][i] = od[k][i].tolist()
for k in od:
if isinstance(od[k], np.int):
od[k] = int(od[k])
elif isinstance(od[k], np.float):
od[k] = float(od[k])
if isinstance(od[k], list):
for i, x in enumerate(od[k]):
if isinstance(x, np.float):
od[k][i] = float(od[k][i])
elif isinstance(x, np.int):
od[k][i] = int(od[k][i])
return od
def get_yaml(self, *args, **kwargs):
"""Get a YAML string representing the dictionary returned by the
get_dict method.
Arguments will be passed to `yaml.dump`."""
od = self.get_dict(distribution=True, iterate=True, arraytolist=True)
return yaml.dump(od, *args, **kwargs)
def delta_logpdf(self, x, **kwargs):
exclude = kwargs.get('exclude', None)
if exclude is not None:
d = len(self.central_value)
cv = [self.central_value[i] for i in range(d) if i not in exclude]
else:
cv = self.central_value
return self.logpdf(x, **kwargs) - self.logpdf(cv, **kwargs)
class UniformDistribution(ProbabilityDistribution):
"""Distribution with constant PDF in a range and zero otherwise."""
def __init__(self, central_value, half_range):
"""Initialize the distribution.
Parameters:
- central_value: arithmetic mean of the upper and lower range boundaries
- half_range: half the difference of upper and lower range boundaries
Example:
central_value = 5 and half_range = 3 leads to the range [2, 8].
"""
self.half_range = half_range
self.range = (central_value - half_range,
central_value + half_range)
super().__init__(central_value, support=self.range)
def __repr__(self):
return 'flavio.statistics.probability.UniformDistribution' + \
'({}, {})'.format(self.central_value, self.half_range)
def get_random(self, size=None):
return np.random.uniform(self.range[0], self.range[1], size)
def _logpdf(self, x):
if x < self.range[0] or x >= self.range[1]:
return -np.inf
else:
return -math.log(2 * self.half_range)
def logpdf(self, x):
_lpvect = np.vectorize(self._logpdf)
return _lpvect(x)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
return confidence_level(nsigma) * self.half_range
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
return confidence_level(nsigma) * self.half_range
class DeltaDistribution(ProbabilityDistribution):
"""Delta Distrubution that is non-vanishing only at a single point."""
def __init__(self, central_value):
"""Initialize the distribution.
Parameters:
- central_value: point where the PDF does not vanish.
"""
super().__init__(central_value, support=(central_value, central_value))
def __repr__(self):
return 'flavio.statistics.probability.DeltaDistribution' + \
'({})'.format(self.central_value)
def get_random(self, size=None):
if size is None:
return self.central_value
else:
return self.central_value * np.ones(size)
def logpdf(self, x):
if np.ndim(x) == 0:
if x == self.central_value:
return 0.
else:
return -np.inf
y = -np.inf*np.ones(np.asarray(x).shape)
y[np.asarray(x) == self.central_value] = 0
return y
def get_error_left(self, *args, **kwargs):
return 0
def get_error_right(self, *args, **kwargs):
return 0
class NormalDistribution(ProbabilityDistribution):
"""Univariate normal or Gaussian distribution."""
def __init__(self, central_value, standard_deviation):
"""Initialize the distribution.
Parameters:
- central_value: location (mode and mean)
- standard_deviation: standard deviation
"""
super().__init__(central_value,
support=(central_value - 6 * standard_deviation,
central_value + 6 * standard_deviation))
if standard_deviation <= 0:
raise ValueError("Standard deviation must be positive number")
self.standard_deviation = standard_deviation
def __repr__(self):
return 'flavio.statistics.probability.NormalDistribution' + \
'({}, {})'.format(self.central_value, self.standard_deviation)
def get_random(self, size=None):
return np.random.normal(self.central_value, self.standard_deviation, size)
def logpdf(self, x):
return normal_logpdf(x, self.central_value, self.standard_deviation)
def pdf(self, x):
return normal_pdf(x, self.central_value, self.standard_deviation)
def cdf(self, x):
return scipy.stats.norm.cdf(x, self.central_value, self.standard_deviation)
def ppf(self, x):
return scipy.stats.norm.ppf(x, self.central_value, self.standard_deviation)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
return nsigma * self.standard_deviation
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
return nsigma * self.standard_deviation
class LogNormalDistribution(ProbabilityDistribution):
"""Univariate log-normal distribution."""
def __init__(self, central_value, factor):
r"""Initialize the distribution.
Parameters:
- central_value: median of the distribution (neither mode nor mean!).
Can be positive or negative, but must be nonzero.
- factor: must be larger than 1. 68% of the probability will be between
`central_value * factor` and `central_value / factor`.
The mean and standard deviation of the underlying normal distribution
correspond to `log(abs(central_value))` and `log(factor)`, respectively.
Example:
`LogNormalDistribution(central_value=3, factor=2)`
corresponds to the distribution of the exponential of a normally
distributed variable with mean ln(3) and standard deviation ln(2).
68% of the probability is within 6=3*2 and 1.5=4/2.
"""
if central_value == 0:
raise ValueError("Central value must not be zero")
if factor <= 1:
raise ValueError("Factor must be bigger than 1")
self.factor = factor
self.log_standard_deviation = np.log(factor)
self.log_central_value = math.log(abs(central_value))
if central_value < 0:
self.central_sign = -1
slim = math.exp(math.log(abs(central_value))
- 6 * self.log_standard_deviation)
super().__init__(central_value,
support=(slim, 0))
else:
self.central_sign = +1
slim = math.exp(math.log(abs(central_value))
+ 6 * self.log_standard_deviation)
super().__init__(central_value,
support=(0, slim))
def __repr__(self):
return 'flavio.statistics.probability.LogNormalDistribution' + \
'({}, {})'.format(self.central_value, self.factor)
def get_random(self, size=None):
s = self.central_sign
return s * np.random.lognormal(self.log_central_value, self.log_standard_deviation, size)
def logpdf(self, x):
s = self.central_sign
return scipy.stats.lognorm.logpdf(s * x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def pdf(self, x):
s = self.central_sign
return scipy.stats.lognorm.pdf(s * x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def cdf(self, x):
if self.central_sign == -1:
return 1 - scipy.stats.lognorm.cdf(-x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
else:
return scipy.stats.lognorm.cdf(x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def ppf(self, x):
if self.central_sign == -1:
return -scipy.stats.lognorm.ppf(1 - x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
else:
return scipy.stats.lognorm.ppf(x, scale=np.exp(self.log_central_value), s=self.log_standard_deviation)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
cl = confidence_level(nsigma)
return self.central_value - self.ppf(0.5 - cl/2.)
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
cl = confidence_level(nsigma)
return self.ppf(0.5 + cl/2.) - self.central_value
class AsymmetricNormalDistribution(ProbabilityDistribution):
"""An asymmetric normal distribution obtained by gluing together two
half-Gaussians and demanding the PDF to be continuous."""
def __init__(self, central_value, right_deviation, left_deviation):
"""Initialize the distribution.
Parameters:
- central_value: mode of the distribution (not equal to its mean!)
- right_deviation: standard deviation of the upper half-Gaussian
- left_deviation: standard deviation of the lower half-Gaussian
"""
super().__init__(central_value,
support=(central_value - 6 * left_deviation,
central_value + 6 * right_deviation))
if right_deviation <= 0 or left_deviation <= 0:
raise ValueError(
"Left and right standard deviations must be positive numbers")
self.right_deviation = right_deviation
self.left_deviation = left_deviation
self.p_right = normal_pdf(
self.central_value, self.central_value, self.right_deviation)
self.p_left = normal_pdf(
self.central_value, self.central_value, self.left_deviation)
def __repr__(self):
return 'flavio.statistics.probability.AsymmetricNormalDistribution' + \
'({}, {}, {})'.format(self.central_value,
self.right_deviation,
self.left_deviation)
def get_random(self, size=None):
if size is None:
return self._get_random()
else:
return np.array([self._get_random() for i in range(size)])
def _get_random(self):
r = np.random.uniform()
a = abs(self.left_deviation /
(self.right_deviation + self.left_deviation))
if r > a:
x = abs(np.random.normal(0, self.right_deviation))
return self.central_value + x
else:
x = abs(np.random.normal(0, self.left_deviation))
return self.central_value - x
def _logpdf(self, x):
# values of the PDF at the central value
if x < self.central_value:
# left-hand side: scale factor
r = 2 * self.p_right / (self.p_left + self.p_right)
return math.log(r) + normal_logpdf(x, self.central_value, self.left_deviation)
else:
# left-hand side: scale factor
r = 2 * self.p_left / (self.p_left + self.p_right)
return math.log(r) + normal_logpdf(x, self.central_value, self.right_deviation)
def logpdf(self, x):
_lpvect = np.vectorize(self._logpdf)
return _lpvect(x)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
return nsigma * self.left_deviation
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
return nsigma * self.right_deviation
class HalfNormalDistribution(ProbabilityDistribution):
"""Half-normal distribution with zero PDF above or below the mode."""
def __init__(self, central_value, standard_deviation):
"""Initialize the distribution.
Parameters:
- central_value: mode of the distribution.
- standard_deviation:
If positive, the PDF is zero below central_value and (twice) that of
a Gaussian with this standard deviation above.
If negative, the PDF is zero above central_value and (twice) that of
a Gaussian with standard deviation equal to abs(standard_deviation)
below.
"""
super().__init__(central_value,
support=sorted((central_value,
central_value + 6 * standard_deviation)))
if standard_deviation == 0:
raise ValueError("Standard deviation must be non-zero number")
self.standard_deviation = standard_deviation
def __repr__(self):
return 'flavio.statistics.probability.HalfNormalDistribution' + \
'({}, {})'.format(self.central_value, self.standard_deviation)
def get_random(self, size=None):
return self.central_value + np.sign(self.standard_deviation) * abs(np.random.normal(0, abs(self.standard_deviation), size))
def _logpdf(self, x):
if np.sign(self.standard_deviation) * (x - self.central_value) < 0:
return -np.inf
else:
return math.log(2) + normal_logpdf(x, self.central_value, abs(self.standard_deviation))
def logpdf(self, x):
_lpvect = np.vectorize(self._logpdf)
return _lpvect(x)
def cdf(self, x):
if np.sign(self.standard_deviation) == -1:
return 1 - scipy.stats.halfnorm.cdf(-x,
loc=-self.central_value,
scale=-self.standard_deviation)
else:
return scipy.stats.halfnorm.cdf(x,
loc=self.central_value,
scale=self.standard_deviation)
def ppf(self, x):
if np.sign(self.standard_deviation) == -1:
return -scipy.stats.halfnorm.ppf(1 - x,
loc=-self.central_value,
scale=-self.standard_deviation)
else:
return scipy.stats.halfnorm.ppf(x,
loc=self.central_value,
scale=self.standard_deviation)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
if self.standard_deviation >= 0:
return 0
else:
return nsigma * (-self.standard_deviation) # return a positive value!
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
if self.standard_deviation <= 0:
return 0
else:
return nsigma * self.standard_deviation
class GaussianUpperLimit(HalfNormalDistribution):
"""Upper limit defined as a half-normal distribution."""
def __init__(self, limit, confidence_level):
"""Initialize the distribution.
Parameters:
- limit: value of the upper limit
- confidence_level: confidence_level of the upper limit. Float between
0 and 1.
"""
if confidence_level > 1 or confidence_level < 0:
raise ValueError("Confidence level should be between 0 und 1")
if limit <= 0:
raise ValueError("The upper limit should be a positive number")
super().__init__(central_value=0,
standard_deviation=self.get_standard_deviation(limit, confidence_level))
self.limit = limit
self.confidence_level = confidence_level
def __repr__(self):
return 'flavio.statistics.probability.GaussianUpperLimit' + \
'({}, {})'.format(self.limit, self.confidence_level)
def get_standard_deviation(self, limit, confidence_level):
"""Convert the confidence level into a Gaussian standard deviation"""
return limit / scipy.stats.norm.ppf(0.5 + confidence_level / 2.)
class GammaDistribution(ProbabilityDistribution):
r"""A Gamma distribution defined like the `gamma` distribution in
`scipy.stats` (with parameters `a`, `loc`, `scale`).
The `central_value` attribute returns the location of the mode.
"""
def __init__(self, a, loc, scale):
if loc > 0:
raise ValueError("loc must be negative or zero")
# "frozen" scipy distribution object
self.scipy_dist = scipy.stats.gamma(a=a, loc=loc, scale=scale)
mode = loc + (a-1)*scale
# support extends until the CDF is roughly "6 sigma"
support_limit = self.scipy_dist.ppf(1-2e-9)
super().__init__(central_value=mode, # the mode
support=(loc, support_limit))
self.a = a
self.loc = loc
self.scale = scale
def __repr__(self):
return 'flavio.statistics.probability.GammaDistribution' + \
'({}, {}, {})'.format(self.a, self.loc, self.scale)
def get_random(self, size):
return self.scipy_dist.rvs(size=size)
def cdf(self, x):
return self.scipy_dist.cdf(x)
def ppf(self, x):
return self.scipy_dist.ppf(x)
def logpdf(self, x):
return self.scipy_dist.logpdf(x)
def _find_error_cdf(self, confidence_level):
# find the value of the CDF at the position of the left boundary
# of the `confidence_level`% CL range by demanding that the value
# of the PDF is the same at the two boundaries
def x_left(a):
return self.ppf(a)
def x_right(a):
return self.ppf(a + confidence_level)
def diff_logpdf(a):
logpdf_x_left = self.logpdf(x_left(a))
logpdf_x_right = self.logpdf(x_right(a))
return logpdf_x_left - logpdf_x_right
return scipy.optimize.brentq(diff_logpdf, 0, 1 - confidence_level-1e-6)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
a = self._find_error_cdf(confidence_level(nsigma))
return self.central_value - self.ppf(a)
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
a = self._find_error_cdf(confidence_level(nsigma))
return self.ppf(a + confidence_level(nsigma)) - self.central_value
class GammaDistributionPositive(ProbabilityDistribution):
r"""A Gamma distribution defined like the `gamma` distribution in
`scipy.stats` (with parameters `a`, `loc`, `scale`), but restricted to
positive values for x and correspondingly rescaled PDF.
The `central_value` attribute returns the location of the mode.
"""
def __init__(self, a, loc, scale):
if loc > 0:
raise ValueError("loc must be negative or zero")
# "frozen" scipy distribution object (without restricting x>0!)
self.scipy_dist = scipy.stats.gamma(a=a, loc=loc, scale=scale)
mode = loc + (a-1)*scale
if mode < 0:
mode = 0
# support extends until the CDF is roughly "6 sigma", assuming x>0
support_limit = self.scipy_dist.ppf(1-2e-9*(1-self.scipy_dist.cdf(0)))
super().__init__(central_value=mode, # the mode
support=(0, support_limit))
self.a = a
self.loc = loc
self.scale = scale
# scale factor for PDF to account for x>0
self._pdf_scale = 1/(1 - self.scipy_dist.cdf(0))
def __repr__(self):
return 'flavio.statistics.probability.GammaDistributionPositive' + \
'({}, {}, {})'.format(self.a, self.loc, self.scale)
def get_random(self, size=None):
if size is None:
return self._get_random(size=size)
else:
# some iteration necessary as discarding negative values
# might lead to too small size
r = np.array([], dtype=float)
while len(r) < size:
r = np.concatenate((r, self._get_random(size=2*size)))
return r[:size]
def _get_random(self, size):
r = self.scipy_dist.rvs(size=size)
return r[(r >= 0)]
def cdf(self, x):
cdf0 = self.scipy_dist.cdf(0)
cdf = (self.scipy_dist.cdf(x) - cdf0)/(1-cdf0)
return np.piecewise(
np.asarray(x, dtype=float),
[x<0, x>=0],
[0., cdf]) # return 0 for negative x
def ppf(self, x):
cdf0 = self.scipy_dist.cdf(0)
return self.scipy_dist.ppf((1-cdf0)*x + cdf0)
def logpdf(self, x):
# return -inf for negative x values
inf0 = np.piecewise(np.asarray(x, dtype=float), [x<0, x>=0], [-np.inf, 0.])
return inf0 + self.scipy_dist.logpdf(x) + np.log(self._pdf_scale)
def _find_error_cdf(self, confidence_level):
# find the value of the CDF at the position of the left boundary
# of the `confidence_level`% CL range by demanding that the value
# of the PDF is the same at the two boundaries
def x_left(a):
return self.ppf(a)
def x_right(a):
return self.ppf(a + confidence_level)
def diff_logpdf(a):
logpdf_x_left = self.logpdf(x_left(a))
logpdf_x_right = self.logpdf(x_right(a))
return logpdf_x_left - logpdf_x_right
return scipy.optimize.brentq(diff_logpdf, 0, 1 - confidence_level-1e-6)
def get_error_left(self, nsigma=1, **kwargs):
"""Return the lower error"""
if self.logpdf(0) > self.logpdf(self.ppf(confidence_level(nsigma))):
# look at a one-sided 1 sigma range. If the PDF at 0
# is smaller than the PDF at the boundary of this range, it means
# that the left-hand error is not meaningful to define.
return self.central_value
else:
a = self._find_error_cdf(confidence_level(nsigma))
return self.central_value - self.ppf(a)
def get_error_right(self, nsigma=1, **kwargs):
"""Return the upper error"""
one_sided_error = self.ppf(confidence_level(nsigma))
if self.logpdf(0) > self.logpdf(one_sided_error):
# look at a one-sided 1 sigma range. If the PDF at 0
# is smaller than the PDF at the boundary of this range, return the
# boundary of the range as the right-hand error
return one_sided_error
else:
a = self._find_error_cdf(confidence_level(nsigma))
return self.ppf(a + confidence_level(nsigma)) - self.central_value
class GammaUpperLimit(GammaDistributionPositive):
r"""Gamma distribution with x restricted to be positive appropriate for
a positive quantitity obtained from a low-statistics counting experiment,
e.g. a rare decay rate, given an upper limit on x."""
def __init__(self, counts_total, counts_background, limit, confidence_level):
r"""Initialize the distribution.
Parameters:
- counts_total: observed total number (signal and background) of counts.
- counts_background: number of expected background counts, assumed to be
known.
- limit: upper limit on x, which is proportional (with a positive
proportionality factor) to the number of signal events.
- confidence_level: confidence level of the upper limit, i.e. the value
of the CDF at the limit. Float between 0 and 1. Frequently used values
are 0.90 and 0.95.
"""
if confidence_level > 1 or confidence_level < 0:
raise ValueError("Confidence level should be between 0 und 1")
if limit <= 0:
raise ValueError("The upper limit should be a positive number")
if counts_total < 0:
raise ValueError("counts_total should be a positive number or zero")
if counts_background < 0:
raise ValueError("counts_background should be a positive number or zero")
self.limit = limit
self.confidence_level = confidence_level
self.counts_total = counts_total
self.counts_background = counts_background
a, loc, scale = self._get_a_loc_scale()
super().__init__(a=a, loc=loc, scale=scale)
def __repr__(self):
return 'flavio.statistics.probability.GammaUpperLimit' + \
'({}, {}, {}, {})'.format(self.counts_total,
self.counts_background,
self.limit,
self.confidence_level)
def _get_a_loc_scale(self):
"""Convert the counts and limit to the input parameters needed for
GammaDistributionPositive"""
a = self.counts_total + 1
loc_unscaled = -self.counts_background
dist_unscaled = GammaDistributionPositive(a=a, loc=loc_unscaled, scale=1)
limit_unscaled = dist_unscaled.ppf(self.confidence_level)
# rescale
scale = self.limit/limit_unscaled
loc = -self.counts_background*scale
return a, loc, scale
class NumericalDistribution(ProbabilityDistribution):
"""Univariate distribution defined in terms of numerical values for the
PDF."""
def __init__(self, x, y, central_value=None):
"""Initialize a 1D numerical distribution.
Parameters:
- `x`: x-axis values. Must be a 1D array of real values in strictly
ascending order (but not necessarily evenly spaced)
- `y`: PDF values. Must be a 1D array of real positive values with the
same length as `x`
- central_value: if None (default), will be set to the mode of the
distribution, i.e. the x-value where y is largest (by looking up
the input arrays, i.e. without interpolation!)
"""
self.x = x
self.y = y
if central_value is not None:
if x[0] <= central_value <= x[-1]:
super().__init__(central_value=central_value,
support=(x[0], x[-1]))
else:
raise ValueError("Central value must be within range provided")
else:
mode = x[ | np.argmax(y) | numpy.argmax |
import numpy as np
from scipy.linalg import qr, solve_triangular, qr_multiply
from CHEBYSHEV.TVB_Method.cheb_class import Polynomial, MultiCheb, TVBError, slice_top, get_var_list, mon_combos, mon_combos_highest, sort_polys_by_degree
"""
This module contains methods for constructing the TvB Matrix associated
to a collection of Chebyshev polynomials
Methods in this module:
telen_van_barel(initial_poly_list): Use the TvB matrix reduction method
to find a vector basis for C[x_1, ..., x_n]/I, where I is the ideal defined by
'initial_poly_list'.
make_basis_dict(matrix, matrix_terms, vector_basis, remainder_shape): Calculate and
returns the basis_dict, which is a mapping of the terms on the diagonal of the
reduced TVB matrix to the terms in the vector basis. It is used to create the
multiplication matrix in root_finder.
clean_zeros_from_matrix(array, accuracy):
find_degree(poly_list): Find the degree needed for a Macaulay/TvB Matrix.
add_polys(degree, poly, poly_coeff_list): Adds polynomials to the Macaulay Matrix.
sorted_matrix_terms(degree, dim): Find the matrix_terms sorted in the term order
needed for telen_van_barel reduction. The highest terms come first, the x,y,z etc
monomials last, the rest in the middle.
create_matrix(poly_coeffs, degree, dim):
Build a Telen Van Barel matrix with specified degree, in specified dimension.
rrqr_reduce_telen_van_barel(matrix, matrix_terms, matrix_shape_stuff): Reduce a
Macaulay matrix in the TvB way--not pivoting the highest and lowest-degree columns.
clean_zeros_from_matrix(array, accuracy): Set all values in the array less than
'accuracy' to 0.
row_swap_matrix(matrix): Rearrange the rows of a matrix so it is closer to upper traingular.
"""
def telen_van_barel(initial_poly_list, accuracy = 1.e-10):
"""Use the Telen-VanBarel matrix reduction method to find a vector basis
for C[x_1, ..., x_n]/I, where I is the ideal defined by initial_poly_list.
Parameters
--------
initial_poly_list: list of Chebyshev polynomials
The polynomials in the system we are solving. These should all be the
same dimension (same number of variables).
accuracy: float
How small a number should be before assuming it is zero.
Returns
-----------
basis_dict : dict
Maps terms on the diagonal of the reduced TVB matrix to the
terms in the vector basis.
vector_basis : numpy array
The terms in the vector basis, each row being a term.
degree : int
The degree of the Macaualy/TvB matrix that was constructed.
"""
dim = initial_poly_list[0].dim #assumes all polys are the same dimension
poly_coeff_list = []
degree = find_degree(initial_poly_list) #find the required degree of the Macaulay matrix
# This sorting is required for fast matrix construction. Ascending should be False.
initial_poly_list = sort_polys_by_degree(initial_poly_list, ascending = False)
# Construct the Macaulay matrix
for i in initial_poly_list:
poly_coeff_list = add_polys(degree, i, poly_coeff_list)
matrix, matrix_terms, matrix_shape_stuff = create_matrix(poly_coeff_list, degree, dim)
# Reduce the matrix to RREF, but leaving the top-degree and lowest-degree terms unpivoted
matrix, matrix_terms = rrqr_reduce_telen_van_barel(matrix, matrix_terms, matrix_shape_stuff, accuracy = accuracy)
height = matrix.shape[0] # Number of rows
matrix[:,height:] = solve_triangular(matrix[:,:height],matrix[:,height:])
matrix[:,:height] = np.eye(height)
vector_basis = matrix_terms[height:]
basis_dict = make_basis_dict(matrix, matrix_terms, vector_basis, [degree]*dim)
return basis_dict, vector_basis, degree
def make_basis_dict(matrix, matrix_terms, vector_basis, remainder_shape):
'''Calculates and returns the basis_dict.
This is a dictionary of the terms on the diagonal of the reduced TVB matrix to the terms in the Vector Basis.
It is used to create the multiplication matrix in root_finder.
Parameters
--------
matrix: numpy array
The reduced TVB matrix.
matrix_terms : numpy array
The terms in the matrix. The i'th row is the term represented by the i'th column of the matrix.
vector_basis : numpy array
Each row is a term in the vector basis.
remainder_shape: list
The shape of the numpy arrays that will be mapped to in the basis_dict.
Returns
-----------
basis_dict : dict
Maps terms on the diagonal of the reduced TVB matrix (tuples) to numpy arrays of the shape remainder_shape
that represent the terms reduction into the Vector Basis.
'''
basis_dict = {}
VBSet = set()
for i in vector_basis:
VBSet.add(tuple(i))
spots = list()
for dim in range(vector_basis.shape[1]):
spots.append(vector_basis.T[dim])
for i in range(matrix.shape[0]):
term = tuple(matrix_terms[i])
remainder = np.zeros(remainder_shape)
row = matrix[i]
remainder[spots] = row[matrix.shape[0]:]
basis_dict[term] = remainder
return basis_dict
def find_degree(poly_list):
'''Find the degree needed for a Macaulay Matrix.
Parameters
--------
poly_list: list
The polynomials used to construct the matrix.
Returns
-----------
find_degree : int
The degree of the Macaulay Matrix.
Example:
For polynomials [P1,P2,P3] with degree [d1,d2,d3] the function returns d1+d2+d3-(number of Polynomaials)+1
'''
#print('len(poly_list) = {}'.format(len(poly_list)))
degree_needed = 0
#print('initializing degree at {}'.format(degree_needed))
for poly in poly_list:
degree_needed += poly.degree
#print('poly.degree = {}'.format(poly.degree))
#print('degree adjusted to {}'.format(degree_needed))
return ((degree_needed - len(poly_list)) + 1)
def add_polys(degree, poly, poly_coeff_list):
"""Adds polynomials to a Macaulay Matrix.
This function is called on one polynomial and adds all monomial multiples of it to the matrix.
Parameters
----------
degree : int
The degree of the Macaulay Matrix
poly : Polynomial
One of the polynomials used to make the matrix.
poly_coeff_list : list
A list of all the current polynomials in the matrix.
Returns
-------
poly_coeff_list : list
The original list of polynomials in the matrix with the new
monomial multiplications of poly appended.
"""
poly_coeff_list.append(poly.coeff)
deg = degree - poly.degree
dim = poly.dim
mons = mon_combos([0]*dim,deg)
for i in mons[1:]: #skips the first, all-zero (constant) monomial
poly_coeff_list.append(poly.mon_mult(i, return_type = 'Matrix'))
return poly_coeff_list
def sorted_matrix_terms(degree, dim):
'''Find the matrix_terms sorted in the term order needed for telen_van_barel reduction.
The highest terms come first, the x,y,z etc monomials last, the rest in the middle.
Parameters
----------
degree : int
The degree of the TVB Matrix (degree of matrix is highest degreefound in find_degree)
dim : int
The dimension of the polynomials going into the matrix. (dimension = how many variables a polynomial has)
Returns
-------
matrix_terms : numpy array
The sorted matrix_terms.
matrix_term_stuff : tuple
The first entry is the number of 'highest' monomial terms. The second entry
is the number of 'other' terms, those not in the first or third catagory.
The third entry is the number of monomials of degree one of a single variable,
as well as the monomial 1.
'''
highest_mons = mon_combos_highest([0]*dim,degree)[::-1]
other_mons = list()
d = degree - 1
while d > 1:
other_mons += mon_combos_highest([0]*dim,d)[::-1]
d -= 1
xs_mons = mon_combos([0]*dim,1)[::-1]
sorted_matrix_terms = np.reshape(highest_mons+other_mons+xs_mons, (len(highest_mons+other_mons+xs_mons),dim))
return sorted_matrix_terms, tuple([len(highest_mons),len(other_mons),len(xs_mons)])
def create_matrix(poly_coeffs, degree, dim):
''' Build a Telen Van Barel matrix with specified degree, in specified dimension.
Parameters
----------
poly_coeffs : list of ndarrays
The coefficients of the Chebyshev polynomials from which to build the TvB matrix.
degree : int
The top degree of the polynomials appearing in the TVB Matrix
dim : int
The dimension (number of variables) of all the polynomials appearing in the matrix.
Returns
-------
matrix : 2D numpy array
The Telen Van Barel matrix.
'''
bigShape = [degree+1]*dim
#print('degree = {}, dim = {}, bigShape = {}'.format(degree,dim,bigShape))
matrix_terms, matrix_shape_stuff = sorted_matrix_terms(degree, dim)
#Get the slices needed to pull the matrix_terms from the coeff matrix.
matrix_term_indexes = [row for row in matrix_terms.T]
#Adds the poly_coeffs to flat_polys, using added_zeros to make sure every term is in there.
added_zeros = np.zeros(bigShape)
#print('added_zeros.shape = {}'.format(added_zeros.shape))
flat_polys = list()
for coeff in poly_coeffs:
#print('coeff of poly_coeffs = {}'.format(coeff))
slices = slice_top(coeff)
#print('slices = {}'.format(slices))
added_zeros[slices] = coeff
flat_polys.append(added_zeros[matrix_term_indexes])
added_zeros[slices] = np.zeros_like(coeff)
coeff = 0
poly_coeffs = 0
#Make the matrix. Reshape is faster than stacking.
matrix = np.reshape(flat_polys, (len(flat_polys),len(matrix_terms)))
if matrix_shape_stuff[0] > matrix.shape[0]: #The matrix isn't tall enough, these can't all be pivot columns.
raise TVBError("HIGHEST NOT FULL RANK. TRY HIGHER DEGREE")
#Sort the rows of the matrix so it is close to upper triangular.
matrix = row_swap_matrix(matrix)
return matrix, matrix_terms, matrix_shape_stuff
def rrqr_reduce_telen_van_barel(matrix, matrix_terms, matrix_shape_stuff, accuracy = 1.e-10):
''' Reduces a Macaulay matrix in the TvB way--not pivoting the highest
and lowest-degree columns.
This function does the same thing as rrqr_reduce_telen_van_barel but
uses qr_multiply instead of qr and a multiplication
to make the function faster and more memory efficient.
Parameters
----------
matrix : numpy array.
The Macaulay matrix, sorted in TVB style.
matrix_terms: numpy array
Each row of the array contains a term in the matrix. The i'th row corresponds to
the i'th column in the matrix.
matrix_shape_stuff : tuple
Terrible name I know. It has 3 values, the first is how many columnns are in the
'highest' part of the matrix. The second is how many are in the 'others' part of
the matrix, and the third is how many are in the 'xs' part.
accuracy : float
What is determined to be 0.
Returns
-------
matrix : numpy array
The reduced matrix.
matrix_terms: numpy array
The resorted matrix_terms.
'''
highest_num = matrix_shape_stuff[0]
others_num = matrix_shape_stuff[1]
xs_num = matrix_shape_stuff[2]
C1,matrix[:highest_num,:highest_num],P1 = qr_multiply(matrix[:,:highest_num], matrix[:,highest_num:].T, mode = 'right', pivoting = True)
matrix[:highest_num,highest_num:] = C1.T
C1 = 0
if abs(matrix[:,:highest_num].diagonal()[-1]) < accuracy:
raise TVBError("HIGHEST NOT FULL RANK")
matrix[:highest_num,highest_num:] = solve_triangular(matrix[:highest_num,:highest_num],matrix[:highest_num,highest_num:])
matrix[:highest_num,:highest_num] = np.eye(highest_num)
matrix[highest_num:,highest_num:] -= (matrix[highest_num:,:highest_num][:,P1])@matrix[:highest_num,highest_num:]
matrix_terms[:highest_num] = matrix_terms[:highest_num][P1]
P1 = 0
C,R,P = qr_multiply(matrix[highest_num:,highest_num:highest_num+others_num], matrix[highest_num:,highest_num+others_num:].T, mode = 'right', pivoting = True)
matrix = matrix[:R.shape[0]+highest_num]
matrix[highest_num:,:highest_num] = | np.zeros_like(matrix[highest_num:,:highest_num]) | numpy.zeros_like |
from matplotlib import pyplot as plt
import numpy as np
import h5py
import keras
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.python.keras import backend as K
# Code based on http://everettsprojects.com/2018/01/17/mnist-visualization.html
# pretrained MNIST Model: https://github.com/kj7kunal/MNIST-Keras
tf.compat.v1.disable_eager_execution()
# Set the matplotlib figure size
plt.rc('figure', figsize = (12.0, 12.0))
# Set the learning phase to false, the model is pre-trained.
K.set_learning_phase(False)
model = load_model('MNIST_keras_CNN.h5')
# Figure out what keras named each of the layers in the model
layer_dict = dict([(layer.name, layer) for layer in model.layers])
print(layer_dict.keys())
# A placeholder for the input images
input_img = model.input
# Dimensions of the images
img_width = 28
img_height = 28
# A constant size step function for gradient ascent
def constant_step(total_steps, step, step_size = 1):
return step_size
# Define an initial divisor and decay rate for a varied step function
# This function works better than constant step for the output layer
init_step_divisor = 100
decay = 10
def vary_step(total_steps, step):
return (1.0 / (init_step_divisor + decay * step))
# Function from the Keras blog that normalizes and scales
# a filter before it is rendered as an image
def normalize_image(x):
# Normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.1
# Clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# Convert to grayscale image array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# Create a numpy array that represents the image of a filter
# in the passed layer output and loss functions. Based on the
# core parts of <NAME>'s blog post.
def visualize_filter(layer_output, loss, steps = 256, step_fn = constant_step, input_initialization = 'random'):
# Compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# Normalization trick: we normalize the gradient
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
# This function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
if K.image_data_format() == 'channels_first':
input_shape = (1, img_width, img_height)
else:
input_shape = (img_width, img_height, 1)
# Initialize the input image. Random works well for the conv layers,
# zeros works better for the output layer.
input_img_data = np.random.random(input_shape) * 255.
if input_initialization == "zeros":
input_img_data = np.zeros(input_shape)
input_img_data = | np.array(input_img_data) | numpy.array |
import numpy as np
from .base import Transform, Alignment, Invertible
from .rbf import R2LogR2RBF
# Note we inherit from Alignment first to get it's n_dims behavior
class ThinPlateSplines(Alignment, Transform, Invertible):
r"""
The thin plate splines (TPS) alignment between 2D `source` and `target`
landmarks.
``kernel`` can be used to specify an alternative kernel function. If
``None`` is supplied, the :class:`R2LogR2RBF` kernel will be used.
Parameters
----------
source : ``(N, 2)`` `ndarray`
The source points to apply the tps from
target : ``(N, 2)`` `ndarray`
The target points to apply the tps to
kernel : :class:`menpo.transform.rbf.RadialBasisFunction`, optional
The kernel to apply.
min_singular_val : `float`, optional
If the target has points that are nearly coincident, the coefficients
matrix is rank deficient, and therefore not invertible. Therefore, we
only take the inverse on the full-rank matrix and drop any singular
values that are less than this value (close to zero).
Raises
------
ValueError
TPS is only with on 2-dimensional data
"""
def __init__(self, source, target, kernel=None, min_singular_val=1e-4):
Alignment.__init__(self, source, target)
if self.n_dims != 2:
raise ValueError('TPS can only be used on 2D data.')
if kernel is None:
kernel = R2LogR2RBF(source.points)
self.min_singular_val = min_singular_val
self.kernel = kernel
# k[i, j] is the rbf weighting between source i and j
# (of course, k is thus symmetrical and it's diagonal nil)
self.k = self.kernel.apply(self.source.points)
# p is a homogeneous version of the source points
self.p = np.concatenate(
[np.ones([self.n_points, 1]), self.source.points], axis=1)
o = np.zeros([3, 3])
top_l = np.concatenate([self.k, self.p], axis=1)
bot_l = np.concatenate([self.p.T, o], axis=1)
self.l = np.concatenate([top_l, bot_l], axis=0)
self.v, self.y, self.coefficients = None, None, None
self._build_coefficients()
def _build_coefficients(self):
self.v = self.target.points.T.copy()
self.y = np.hstack([self.v, np.zeros([2, 3])])
# If two points are coincident, or very close to being so, then the
# matrix is rank deficient and thus not-invertible. Therefore,
# only take the inverse on the full-rank set of indices.
_u, _s, _v = | np.linalg.svd(self.l) | numpy.linalg.svd |
__copyright__ = "Copyright (C) 2020-21 University of Illinois Board of Trustees"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from dataclasses import dataclass
import numpy as np
import pytest
from pytools.obj_array import make_obj_array
from arraycontext import (
ArrayContext,
dataclass_array_container, with_container_arithmetic,
serialize_container, deserialize_container,
freeze, thaw,
FirstAxisIsElementsTag,
PyOpenCLArrayContext,
PytatoPyOpenCLArrayContext,
ArrayContainer,)
from arraycontext import ( # noqa: F401
pytest_generate_tests_for_array_contexts,
)
from arraycontext.pytest import (_PytestPyOpenCLArrayContextFactoryWithClass,
_PytestPytatoPyOpenCLArrayContextFactory)
import logging
logger = logging.getLogger(__name__)
# {{{ array context fixture
class _PyOpenCLArrayContextForTests(PyOpenCLArrayContext):
"""Like :class:`PyOpenCLArrayContext`, but applies no program transformations
whatsoever. Only to be used for testing internal to :mod:`arraycontext`.
"""
def transform_loopy_program(self, t_unit):
return t_unit
class _PytatoPyOpenCLArrayContextForTests(PytatoPyOpenCLArrayContext):
"""Like :class:`PytatoPyOpenCLArrayContext`, but applies no program
transformations whatsoever. Only to be used for testing internal to
:mod:`arraycontext`.
"""
def transform_loopy_program(self, t_unit):
return t_unit
class _PyOpenCLArrayContextWithHostScalarsForTestsFactory(
_PytestPyOpenCLArrayContextFactoryWithClass):
actx_class = _PyOpenCLArrayContextForTests
class _PyOpenCLArrayContextForTestsFactory(
_PyOpenCLArrayContextWithHostScalarsForTestsFactory):
force_device_scalars = True
class _PytatoPyOpenCLArrayContextForTestsFactory(
_PytestPytatoPyOpenCLArrayContextFactory):
actx_class = _PytatoPyOpenCLArrayContextForTests
pytest_generate_tests = pytest_generate_tests_for_array_contexts([
_PyOpenCLArrayContextForTestsFactory,
_PyOpenCLArrayContextWithHostScalarsForTestsFactory,
_PytatoPyOpenCLArrayContextForTestsFactory,
])
def _acf():
import pyopencl as cl
context = cl._csc()
queue = cl.CommandQueue(context)
return _PyOpenCLArrayContextForTests(queue, force_device_scalars=True)
# }}}
# {{{ stand-in DOFArray implementation
@with_container_arithmetic(
bcast_obj_array=True,
bcast_numpy_array=True,
bitwise=True,
rel_comparison=True,
_cls_has_array_context_attr=True)
class DOFArray:
def __init__(self, actx, data):
if not (actx is None or isinstance(actx, ArrayContext)):
raise TypeError("actx must be of type ArrayContext")
if not isinstance(data, tuple):
raise TypeError("'data' argument must be a tuple")
self.array_context = actx
self.data = data
__array_priority__ = 10
def __bool__(self):
if len(self) == 1 and self.data[0].size == 1:
return bool(self.data[0])
raise ValueError(
"The truth value of an array with more than one element is "
"ambiguous. Use actx.np.any(x) or actx.np.all(x)")
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def __repr__(self):
return f"DOFArray({repr(self.data)})"
@classmethod
def _serialize_init_arrays_code(cls, instance_name):
return {"_":
(f"{instance_name}_i", f"{instance_name}")}
@classmethod
def _deserialize_init_arrays_code(cls, template_instance_name, args):
(_, arg), = args.items()
# Why tuple([...])? https://stackoverflow.com/a/48592299
return (f"{template_instance_name}.array_context, tuple([{arg}])")
@property
def real(self):
return DOFArray(self.array_context, tuple([subary.real for subary in self]))
@property
def imag(self):
return DOFArray(self.array_context, tuple([subary.imag for subary in self]))
@serialize_container.register(DOFArray)
def _serialize_dof_container(ary: DOFArray):
return enumerate(ary.data)
@deserialize_container.register(DOFArray)
def _deserialize_dof_container(
template, iterable):
def _raise_index_inconsistency(i, stream_i):
raise ValueError(
"out-of-sequence indices supplied in DOFArray deserialization "
f"(expected {i}, received {stream_i})")
return type(template)(
template.array_context,
data=tuple(
v if i == stream_i else _raise_index_inconsistency(i, stream_i)
for i, (stream_i, v) in enumerate(iterable)))
@freeze.register(DOFArray)
def _freeze_dofarray(ary, actx=None):
assert actx is None
return type(ary)(
None,
tuple(ary.array_context.freeze(subary) for subary in ary.data))
@thaw.register(DOFArray)
def _thaw_dofarray(ary, actx):
if ary.array_context is not None:
raise ValueError("cannot thaw DOFArray that already has an array context")
return type(ary)(
actx,
tuple(actx.thaw(subary) for subary in ary.data))
# }}}
# {{{ assert_close_to_numpy*
def randn(shape, dtype):
rng = np.random.default_rng()
dtype = np.dtype(dtype)
if dtype.kind == "c":
dtype = np.dtype(f"<f{dtype.itemsize // 2}")
return rng.standard_normal(shape, dtype) \
+ 1j * rng.standard_normal(shape, dtype)
elif dtype.kind == "f":
return rng.standard_normal(shape, dtype)
elif dtype.kind == "i":
return rng.integers(0, 128, shape, dtype)
else:
raise TypeError(dtype.kind)
def assert_close_to_numpy(actx, op, args):
assert np.allclose(
actx.to_numpy(
op(actx.np, *[
actx.from_numpy(arg) if isinstance(arg, np.ndarray) else arg
for arg in args])),
op(np, *args))
def assert_close_to_numpy_in_containers(actx, op, args):
assert_close_to_numpy(actx, op, args)
ref_result = op(np, *args)
# {{{ test DOFArrays
dofarray_args = [
DOFArray(actx, (actx.from_numpy(arg),))
if isinstance(arg, np.ndarray) else arg
for arg in args]
actx_result = op(actx.np, *dofarray_args)
if isinstance(actx_result, DOFArray):
actx_result = actx_result[0]
assert np.allclose(actx.to_numpy(actx_result), ref_result)
# }}}
# {{{ test object arrays of DOFArrays
obj_array_args = [
make_obj_array([arg]) if isinstance(arg, DOFArray) else arg
for arg in dofarray_args]
obj_array_result = op(actx.np, *obj_array_args)
if isinstance(obj_array_result, np.ndarray):
obj_array_result = obj_array_result[0][0]
assert np.allclose(actx.to_numpy(obj_array_result), ref_result)
# }}}
# }}}
# {{{ np.function same as numpy
@pytest.mark.parametrize(("sym_name", "n_args", "dtype"), [
# float only
("arctan2", 2, np.float64),
("minimum", 2, np.float64),
("maximum", 2, np.float64),
("where", 3, np.float64),
("min", 1, np.float64),
("max", 1, np.float64),
("any", 1, np.float64),
("all", 1, np.float64),
# float + complex
("sin", 1, np.float64),
("sin", 1, np.complex128),
("exp", 1, np.float64),
("exp", 1, np.complex128),
("conj", 1, np.float64),
("conj", 1, np.complex128),
("vdot", 2, np.float64),
("vdot", 2, np.complex128),
("abs", 1, np.float64),
("abs", 1, np.complex128),
("sum", 1, np.float64),
("sum", 1, np.complex64),
])
def test_array_context_np_workalike(actx_factory, sym_name, n_args, dtype):
actx = actx_factory()
if not hasattr(actx.np, sym_name):
pytest.skip(f"'{sym_name}' not implemented on '{type(actx).__name__}'")
ndofs = 512
args = [randn(ndofs, dtype) for i in range(n_args)]
assert_close_to_numpy_in_containers(
actx, lambda _np, *_args: getattr(_np, sym_name)(*_args), args)
@pytest.mark.parametrize(("sym_name", "n_args", "dtype"), [
("zeros_like", 1, np.float64),
("zeros_like", 1, np.complex128),
("ones_like", 1, np.float64),
("ones_like", 1, np.complex128),
])
def test_array_context_np_like(actx_factory, sym_name, n_args, dtype):
actx = actx_factory()
ndofs = 512
args = [randn(ndofs, dtype) for i in range(n_args)]
assert_close_to_numpy(
actx, lambda _np, *_args: getattr(_np, sym_name)(*_args), args)
# }}}
# {{{ array manipulations
def test_actx_stack(actx_factory):
actx = actx_factory()
ndofs = 5000
args = [np.random.randn(ndofs) for i in range(10)]
assert_close_to_numpy_in_containers(
actx, lambda _np, *_args: _np.stack(_args), args)
def test_actx_concatenate(actx_factory):
actx = actx_factory()
ndofs = 5000
args = [np.random.randn(ndofs) for i in range(10)]
assert_close_to_numpy(
actx, lambda _np, *_args: _np.concatenate(_args), args)
def test_actx_reshape(actx_factory):
actx = actx_factory()
for new_shape in [(3, 2), (3, -1), (6,), (-1,)]:
assert_close_to_numpy(
actx, lambda _np, *_args: _np.reshape(*_args),
(np.random.randn(2, 3), new_shape))
def test_actx_ravel(actx_factory):
from numpy.random import default_rng
actx = actx_factory()
rng = default_rng()
ndim = rng.integers(low=1, high=6)
shape = tuple(rng.integers(2, 7, ndim))
assert_close_to_numpy(actx, lambda _np, ary: _np.ravel(ary),
(rng.random(shape),))
# }}}
# {{{ arithmetic same as numpy
def test_dof_array_arithmetic_same_as_numpy(actx_factory):
actx = actx_factory()
ndofs = 50_000
def get_real(ary):
return ary.real
def get_imag(ary):
return ary.imag
import operator
from pytools import generate_nonnegative_integer_tuples_below as gnitb
from random import uniform, randrange
for op_func, n_args, use_integers in [
(operator.add, 2, False),
(operator.sub, 2, False),
(operator.mul, 2, False),
(operator.truediv, 2, False),
(operator.pow, 2, False),
# FIXME pyopencl.Array doesn't do mod.
#(operator.mod, 2, True),
#(operator.mod, 2, False),
#(operator.imod, 2, True),
#(operator.imod, 2, False),
# FIXME: Two outputs
#(divmod, 2, False),
(operator.iadd, 2, False),
(operator.isub, 2, False),
(operator.imul, 2, False),
(operator.itruediv, 2, False),
(operator.and_, 2, True),
(operator.xor, 2, True),
(operator.or_, 2, True),
(operator.iand, 2, True),
(operator.ixor, 2, True),
(operator.ior, 2, True),
(operator.ge, 2, False),
(operator.lt, 2, False),
(operator.gt, 2, False),
(operator.eq, 2, True),
(operator.ne, 2, True),
(operator.pos, 1, False),
(operator.neg, 1, False),
(operator.abs, 1, False),
(get_real, 1, False),
(get_imag, 1, False),
]:
for is_array_flags in gnitb(2, n_args):
if sum(is_array_flags) == 0:
# all scalars, no need to test
continue
if is_array_flags[0] == 0 and op_func in [
operator.iadd, operator.isub,
operator.imul, operator.itruediv,
operator.iand, operator.ixor, operator.ior,
]:
# can't do in place operations with a scalar lhs
continue
if op_func == operator.ge:
op_func_actx = actx.np.greater_equal
elif op_func == operator.lt:
op_func_actx = actx.np.less
elif op_func == operator.gt:
op_func_actx = actx.np.greater
elif op_func == operator.eq:
op_func_actx = actx.np.equal
elif op_func == operator.ne:
op_func_actx = actx.np.not_equal
else:
op_func_actx = op_func
args = [
(0.5+np.random.rand(ndofs)
if not use_integers else
np.random.randint(3, 200, ndofs))
if is_array_flag else
(uniform(0.5, 2)
if not use_integers
else randrange(3, 200))
for is_array_flag in is_array_flags]
# {{{ get reference numpy result
# make a copy for the in place operators
ref_args = [
arg.copy() if isinstance(arg, np.ndarray) else arg
for arg in args]
ref_result = op_func(*ref_args)
# }}}
# {{{ test DOFArrays
actx_args = [
DOFArray(actx, (actx.from_numpy(arg),))
if isinstance(arg, np.ndarray) else arg
for arg in args]
actx_result = actx.to_numpy(op_func_actx(*actx_args)[0])
assert np.allclose(actx_result, ref_result)
# }}}
# {{{ test object arrays of DOFArrays
# It would be very nice if comparisons on object arrays behaved
# consistently with everything else. Alas, they do not. Instead:
#
# 0.5 < obj_array(DOFArray) -> obj_array([True])
#
# because hey, 0.5 < DOFArray returned something truthy.
if op_func not in [
operator.eq, operator.ne,
operator.le, operator.lt,
operator.ge, operator.gt,
operator.iadd, operator.isub,
operator.imul, operator.itruediv,
operator.iand, operator.ixor, operator.ior,
# All Python objects are real-valued, right?
get_imag,
]:
obj_array_args = [
make_obj_array([arg]) if isinstance(arg, DOFArray) else arg
for arg in actx_args]
obj_array_result = actx.to_numpy(
op_func_actx(*obj_array_args)[0][0])
assert np.allclose(obj_array_result, ref_result)
# }}}
# }}}
# {{{ reductions same as numpy
@pytest.mark.parametrize("op", ["sum", "min", "max"])
def test_reductions_same_as_numpy(actx_factory, op):
actx = actx_factory()
ary = np.random.randn(3000)
np_red = getattr(np, op)(ary)
actx_red = getattr(actx.np, op)(actx.from_numpy(ary))
actx_red = actx.to_numpy(actx_red)
from numbers import Number
if isinstance(actx, PyOpenCLArrayContext) and (not actx._force_device_scalars):
assert isinstance(actx_red, Number)
else:
assert actx_red.shape == ()
assert np.allclose(np_red, actx_red)
@pytest.mark.parametrize("sym_name", ["any", "all"])
def test_any_all_same_as_numpy(actx_factory, sym_name):
actx = actx_factory()
if not hasattr(actx.np, sym_name):
pytest.skip(f"'{sym_name}' not implemented on '{type(actx).__name__}'")
rng = np.random.default_rng()
ary_any = rng.integers(0, 2, 512)
ary_all = np.ones(512)
assert_close_to_numpy_in_containers(actx,
lambda _np, *_args: getattr(_np, sym_name)(*_args), [ary_any])
assert_close_to_numpy_in_containers(actx,
lambda _np, *_args: getattr(_np, sym_name)(*_args), [ary_all])
assert_close_to_numpy_in_containers(actx,
lambda _np, *_args: getattr(_np, sym_name)(*_args), [1 - ary_all])
def test_array_equal_same_as_numpy(actx_factory):
actx = actx_factory()
sym_name = "array_equal"
if not hasattr(actx.np, sym_name):
pytest.skip(f"'{sym_name}' not implemented on '{type(actx).__name__}'")
rng = np.random.default_rng()
ary = rng.integers(0, 2, 512)
ary_copy = ary.copy()
ary_diff_values = np.ones(512)
ary_diff_shape = np.ones(511)
ary_diff_type = DOFArray(actx, (np.ones(512),))
# Equal
assert_close_to_numpy_in_containers(actx,
lambda _np, *_args: getattr(_np, sym_name)(*_args), [ary, ary_copy])
# Different values
assert_close_to_numpy_in_containers(actx,
lambda _np, *_args: getattr(_np, sym_name)(*_args), [ary, ary_diff_values])
# Different shapes
assert_close_to_numpy_in_containers(actx,
lambda _np, *_args: getattr(_np, sym_name)(*_args), [ary, ary_diff_shape])
# Different types
assert not actx.to_numpy(actx.np.array_equal(ary, ary_diff_type))
# }}}
# {{{ test array context einsum
@pytest.mark.parametrize("spec", [
"ij->ij",
"ij->ji",
"ii->i",
])
def test_array_context_einsum_array_manipulation(actx_factory, spec):
actx = actx_factory()
mat = actx.from_numpy(np.random.randn(10, 10))
res = actx.to_numpy(actx.einsum(spec, mat,
tagged=(FirstAxisIsElementsTag())))
ans = np.einsum(spec, actx.to_numpy(mat))
assert np.allclose(res, ans)
@pytest.mark.parametrize("spec", [
"ij,ij->ij",
"ij,ji->ij",
"ij,kj->ik",
])
def test_array_context_einsum_array_matmatprods(actx_factory, spec):
actx = actx_factory()
mat_a = actx.from_numpy(np.random.randn(5, 5))
mat_b = actx.from_numpy(np.random.randn(5, 5))
res = actx.to_numpy(actx.einsum(spec, mat_a, mat_b,
tagged=(FirstAxisIsElementsTag())))
ans = np.einsum(spec, actx.to_numpy(mat_a), actx.to_numpy(mat_b))
assert np.allclose(res, ans)
@pytest.mark.parametrize("spec", [
"im,mj,k->ijk"
])
def test_array_context_einsum_array_tripleprod(actx_factory, spec):
actx = actx_factory()
mat_a = actx.from_numpy(np.random.randn(7, 5))
mat_b = actx.from_numpy(np.random.randn(5, 7))
vec = actx.from_numpy(np.random.randn(7))
res = actx.to_numpy(actx.einsum(spec, mat_a, mat_b, vec,
tagged=(FirstAxisIsElementsTag())))
ans = np.einsum(spec,
actx.to_numpy(mat_a),
actx.to_numpy(mat_b),
actx.to_numpy(vec))
assert np.allclose(res, ans)
# }}}
# {{{ array container classes for test
@with_container_arithmetic(bcast_obj_array=False,
eq_comparison=False, rel_comparison=False)
@dataclass_array_container
@dataclass(frozen=True)
class MyContainer:
name: str
mass: DOFArray
momentum: np.ndarray
enthalpy: DOFArray
@property
def array_context(self):
return self.mass.array_context
@with_container_arithmetic(
bcast_obj_array=False,
bcast_container_types=(DOFArray, np.ndarray),
matmul=True,
rel_comparison=True,)
@dataclass_array_container
@dataclass(frozen=True)
class MyContainerDOFBcast:
name: str
mass: DOFArray
momentum: np.ndarray
enthalpy: DOFArray
@property
def array_context(self):
return self.mass.array_context
def _get_test_containers(actx, ambient_dim=2, size=50_000):
if size == 0:
x = DOFArray(actx, (actx.from_numpy(np.array(np.random.randn())),))
else:
x = DOFArray(actx, (actx.from_numpy(np.random.randn(size)),))
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
dataclass_of_dofs = MyContainer(
name="container",
mass=x,
momentum=make_obj_array([x] * ambient_dim),
enthalpy=x)
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
bcast_dataclass_of_dofs = MyContainerDOFBcast(
name="container",
mass=x,
momentum=make_obj_array([x] * ambient_dim),
enthalpy=x)
ary_dof = x
ary_of_dofs = make_obj_array([x] * ambient_dim)
mat_of_dofs = np.empty((ambient_dim, ambient_dim), dtype=object)
for i in np.ndindex(mat_of_dofs.shape):
mat_of_dofs[i] = x
return (ary_dof, ary_of_dofs, mat_of_dofs, dataclass_of_dofs,
bcast_dataclass_of_dofs)
def test_container_scalar_map(actx_factory):
actx = actx_factory()
arys = _get_test_containers(actx, size=0)
arys += (np.pi,)
from arraycontext import (
map_array_container, rec_map_array_container,
map_reduce_array_container, rec_map_reduce_array_container,
)
for ary in arys:
result = map_array_container(lambda x: x, ary)
assert result is not None
result = rec_map_array_container(lambda x: x, ary)
assert result is not None
result = map_reduce_array_container(np.shape, lambda x: x, ary)
assert result is not None
result = rec_map_reduce_array_container(np.shape, lambda x: x, ary)
assert result is not None
def test_container_multimap(actx_factory):
actx = actx_factory()
ary_dof, ary_of_dofs, mat_of_dofs, dc_of_dofs, bcast_dc_of_dofs = \
_get_test_containers(actx)
# {{{ check
def _check_allclose(f, arg1, arg2, atol=2.0e-14):
assert np.linalg.norm(actx.to_numpy(f(arg1) - arg2)) < atol
def func_all_scalar(x, y):
return x + y
def func_first_scalar(x, subary):
return x + subary
def func_multiple_scalar(a, subary1, b, subary2):
return a * subary1 + b * subary2
from arraycontext import rec_multimap_array_container
result = rec_multimap_array_container(func_all_scalar, 1, 2)
assert result == 3
from functools import partial
for ary in [ary_dof, ary_of_dofs, mat_of_dofs, dc_of_dofs]:
result = rec_multimap_array_container(func_first_scalar, 1, ary)
rec_multimap_array_container(
partial(_check_allclose, lambda x: 1 + x),
ary, result)
result = rec_multimap_array_container(func_multiple_scalar, 2, ary, 2, ary)
rec_multimap_array_container(
partial(_check_allclose, lambda x: 4 * x),
ary, result)
with pytest.raises(AssertionError):
rec_multimap_array_container(func_multiple_scalar, 2, ary_dof, 2, dc_of_dofs)
# }}}
def test_container_arithmetic(actx_factory):
actx = actx_factory()
ary_dof, ary_of_dofs, mat_of_dofs, dc_of_dofs, bcast_dc_of_dofs = \
_get_test_containers(actx)
# {{{ check
def _check_allclose(f, arg1, arg2, atol=5.0e-14):
assert np.linalg.norm(actx.to_numpy(f(arg1) - arg2)) < atol
from functools import partial
from arraycontext import rec_multimap_array_container
for ary in [ary_dof, ary_of_dofs, mat_of_dofs, dc_of_dofs]:
rec_multimap_array_container(
partial(_check_allclose, lambda x: 3 * x),
ary, 2 * ary + ary)
rec_multimap_array_container(
partial(_check_allclose, lambda x: actx.np.sin(x)),
ary, actx.np.sin(ary))
with pytest.raises(TypeError):
ary_of_dofs + dc_of_dofs
with pytest.raises(TypeError):
dc_of_dofs + ary_of_dofs
with pytest.raises(TypeError):
ary_dof + dc_of_dofs
with pytest.raises(TypeError):
dc_of_dofs + ary_dof
bcast_result = ary_dof + bcast_dc_of_dofs
bcast_dc_of_dofs + ary_dof
assert actx.to_numpy(actx.np.linalg.norm(bcast_result.mass
- 2*ary_of_dofs)) < 1e-8
mock_gradient = MyContainerDOFBcast(
name="yo",
mass=ary_of_dofs,
momentum=mat_of_dofs,
enthalpy=ary_of_dofs)
grad_matvec_result = mock_gradient @ ary_of_dofs
assert isinstance(grad_matvec_result.mass, DOFArray)
assert grad_matvec_result.momentum.shape == ary_of_dofs.shape
assert actx.to_numpy(actx.np.linalg.norm(
grad_matvec_result.mass - sum(ary_of_dofs**2)
)) < 1e-8
# }}}
def test_container_freeze_thaw(actx_factory):
actx = actx_factory()
ary_dof, ary_of_dofs, mat_of_dofs, dc_of_dofs, bcast_dc_of_dofs = \
_get_test_containers(actx)
# {{{ check
from arraycontext import get_container_context
from arraycontext import get_container_context_recursively
assert get_container_context(ary_of_dofs) is None
assert get_container_context(mat_of_dofs) is None
assert get_container_context(ary_dof) is actx
assert get_container_context(dc_of_dofs) is actx
assert get_container_context_recursively(ary_of_dofs) is actx
assert get_container_context_recursively(mat_of_dofs) is actx
for ary in [ary_dof, ary_of_dofs, mat_of_dofs, dc_of_dofs]:
frozen_ary = freeze(ary)
thawed_ary = thaw(frozen_ary, actx)
frozen_ary = freeze(thawed_ary)
assert get_container_context_recursively(frozen_ary) is None
assert get_container_context_recursively(thawed_ary) is actx
actx2 = actx.clone()
ary_dof_frozen = freeze(ary_dof)
with pytest.raises(ValueError) as exc_info:
ary_dof + ary_dof_frozen
assert "frozen" in str(exc_info.value)
ary_dof_2 = thaw(freeze(ary_dof), actx2)
with pytest.raises(ValueError):
ary_dof + ary_dof_2
# }}}
@pytest.mark.parametrize("ord", [2, np.inf])
def test_container_norm(actx_factory, ord):
actx = actx_factory()
from pytools.obj_array import make_obj_array
c = MyContainer(name="hey", mass=1, momentum=make_obj_array([2, 3]), enthalpy=5)
n1 = actx.np.linalg.norm(make_obj_array([c, c]), ord)
n2 = np.linalg.norm([1, 2, 3, 5]*2, ord)
assert abs(n1 - n2) < 1e-12
# }}}
# {{{ test from_numpy and to_numpy
def test_numpy_conversion(actx_factory):
actx = actx_factory()
ac = MyContainer(
name="test_numpy_conversion",
mass=np.random.rand(42),
momentum=make_obj_array([np.random.rand(42) for _ in range(3)]),
enthalpy=np.random.rand(42),
)
from arraycontext import from_numpy, to_numpy
ac_actx = from_numpy(ac, actx)
ac_roundtrip = to_numpy(ac_actx, actx)
assert np.allclose(ac.mass, ac_roundtrip.mass)
assert np.allclose(ac.momentum[0], ac_roundtrip.momentum[0])
from dataclasses import replace
ac_with_cl = replace(ac, enthalpy=ac_actx.mass)
with pytest.raises(TypeError):
from_numpy(ac_with_cl, actx)
with pytest.raises(TypeError):
from_numpy(ac_actx, actx)
with pytest.raises(ValueError):
to_numpy(ac, actx)
# }}}
# {{{ test actx.np.linalg.norm
@pytest.mark.parametrize("norm_ord", [2, np.inf])
def test_norm_complex(actx_factory, norm_ord):
actx = actx_factory()
a = randn(2000, np.complex128)
norm_a_ref = np.linalg.norm(a, norm_ord)
norm_a = actx.np.linalg.norm(actx.from_numpy(a), norm_ord)
norm_a = actx.to_numpy(norm_a)
assert abs(norm_a_ref - norm_a)/norm_a < 1e-13
@pytest.mark.parametrize("ndim", [1, 2, 3, 4, 5])
def test_norm_ord_none(actx_factory, ndim):
actx = actx_factory()
from numpy.random import default_rng
rng = | default_rng() | numpy.random.default_rng |
# -*- coding: utf-8 -*-
#==========================================
# Title: BaseBO.py
# Author: <NAME> and <NAME>
# Date: 20 August 2019
# Link: https://arxiv.org/abs/1906.08878
#==========================================
import os
import pickle
import random
import numpy as np
MAX_RANDOM_SEED = 2 ** 31 - 1
class BaseBO():
"""
Base class with common operations for BO with continuous and categorical
inputs
"""
def __init__(self, objfn, initN, bounds, C, rand_seed=108, debug=False,
batch_size=1, **kwargs):
self.f = objfn # function to optimise
self.bounds = bounds # function bounds
self.batch_size = batch_size
self.C = C # no of categories
self.initN = initN # no: of initial points
self.nDim = len(self.bounds) # dimension
self.rand_seed = rand_seed
self.debug = debug
self.saving_path = None
self.kwargs = kwargs
self.x_bounds = np.vstack([d['domain'] for d in self.bounds
if d['type'] == 'continuous'])
def initialise(self, seed):
"""Get NxN intial points"""
data = []
result = []
print(f"Creating init data for seed {seed}")
initial_data_x = | np.zeros((self.initN, self.nDim)) | numpy.zeros |
"""
:Author: <NAME> <<EMAIL>>
Module implementing non-parametric regressions using kernel smoothing methods.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import scipy
from scipy import stats
from scipy.linalg import sqrtm, solve
from .compat import irange
from .cyth import HAS_CYTHON
local_linear = None
def useCython():
"""
Switch to using Cython methods if available
"""
global local_linear
if HAS_CYTHON:
from . import cy_local_linear
local_linear = cy_local_linear
def usePython():
"""
Switch to using the python implementation of the methods
"""
global local_linear
from . import py_local_linear
local_linear = py_local_linear
if HAS_CYTHON:
useCython()
else:
usePython()
from .kde import scotts_covariance
from .kernels import normal_kernel, normal_kernel1d
class SpatialAverage(object):
r"""
Perform a Nadaraya-Watson regression on the data (i.e. also called
local-constant regression) using a gaussian kernel.
The Nadaraya-Watson estimate is given by:
.. math::
f_n(x) \triangleq \frac{\sum_i K\left(\frac{x-X_i}{h}\right) Y_i}
{\sum_i K\left(\frac{x-X_i}{h}\right)}
Where :math:`K(x)` is the kernel and must be such that :math:`E(K(x)) = 0`
and :math:`h` is the bandwidth of the method.
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:type cov: ndarray or callable
:param cov: If an ndarray, it should be a 2D array giving the matrix of
covariance of the gaussian kernel. Otherwise, it should be a function
``cov(xdata, ydata)`` returning the covariance matrix.
"""
def __init__(self, xdata, ydata, cov=scotts_covariance):
self.xdata = np.atleast_2d(xdata)
self.ydata = np.atleast_1d(ydata)
self._bw = None
self._covariance = None
self._inv_cov = None
self.covariance = cov
self.d, self.n = self.xdata.shape
self.correction = 1.
@property
def bandwidth(self):
"""
Bandwidth of the kernel. It cannot be set directly, but rather should
be set via the covariance attribute.
"""
if self._bw is None and self._covariance is not None:
self._bw = np.real(sqrtm(self._covariance))
return self._bw
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a 2D matrix for the covariance of the kernel.
"""
return self._covariance
@covariance.setter # noqa
def covariance(self, cov):
if callable(cov):
_cov = np.atleast_2d(cov(self.xdata, self.ydata))
else:
_cov = np.atleast_2d(cov)
self._bw = None
self._covariance = _cov
self._inv_cov = scipy.linalg.inv(_cov)
def evaluate(self, points, result=None):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put in this
array
"""
points = np.atleast_2d(points).astype(self.xdata.dtype)
#norm = self.kde(points)
d, m = points.shape
if result is None:
result = np.zeros((m,), points.dtype)
norm = np.zeros((m,), points.dtype)
# iterate on the internal points
for i, ci in np.broadcast(irange(self.n),
irange(self._correction.shape[0])):
diff = np.dot(self._correction[ci],
self.xdata[:, i, np.newaxis] - points)
tdiff = np.dot(self._inv_cov, diff)
energy = np.exp(-np.sum(diff * tdiff, axis=0) / 2.0)
result += self.ydata[i] * energy
norm += energy
result[norm > 0] /= norm[norm > 0]
return result
def __call__(self, *args, **kwords):
"""
This method is an alias for :py:meth:`SpatialAverage.evaluate`
"""
return self.evaluate(*args, **kwords)
@property
def correction(self):
"""
The correction coefficient allows to change the width of the kernel
depending on the point considered. It can be either a constant (to
correct globaly the kernel width), or a 1D array of same size as the
input.
"""
return self._correction
@correction.setter # noqa
def correction(self, value):
self._correction = np.atleast_1d(value)
def set_density_correction(self):
"""
Add a correction coefficient depending on the density of the input
"""
kde = stats.gaussian_kde(self.xdata)
dens = kde(self.xdata)
dm = dens.max()
dens[dens < 1e-50] = dm
self._correction = dm / dens
class LocalLinearKernel1D(object):
r"""
Perform a local-linear regression using a gaussian kernel.
The local constant regression is the function that minimises, for each
position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - a_1(x-X_i)\right)^2
Where :math:`K(x)` is the kernel and must be such that :math:`E(K(x)) = 0`
and :math:`h` is the bandwidth of the method.
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning the
variance.
"""
def __init__(self, xdata, ydata, cov=scotts_covariance):
self.xdata = np.atleast_1d(xdata)
self.ydata = np.atleast_1d(ydata)
self.n = self.xdata.shape[0]
self._bw = None
self._covariance = None
self.covariance = cov
@property
def bandwidth(self):
"""
Bandwidth of the kernel.
"""
return self._bw
@property
def covariance(self):
"""
Covariance of the gaussian kernel.
Can be set either as a fixed value or using a bandwith calculator,
that is a function of signature ``w(xdata, ydata)`` that returns
a single value.
.. note::
A ndarray with a single value will be converted to a floating
point value.
"""
return self._covariance
@covariance.setter # noqa
def covariance(self, cov):
if callable(cov):
_cov = float(cov(self.xdata, self.ydata))
else:
_cov = float(cov)
self._covariance = _cov
self._bw = np.sqrt(_cov)
def evaluate(self, points, out=None):
"""
Evaluate the spatial averaging on a set of points
:param ndarray points: Points to evaluate the averaging on
:param ndarray result: If provided, the result will be put in this
array
"""
li2, out = local_linear.local_linear_1d(self._bw, self.xdata,
self.ydata, points, out)
self.li2 = li2
return out
def __call__(self, *args, **kwords):
"""
This method is an alias for :py:meth:`LocalLinearKernel1D.evaluate`
"""
return self.evaluate(*args, **kwords)
class PolynomialDesignMatrix1D(object):
def __init__(self, dim):
self.dim = dim
powers = np.arange(0, dim + 1).reshape((1, dim + 1))
self.powers = powers
def __call__(self, dX, out=None):
return np.power(dX, self.powers, out) # / self.frac
class LocalPolynomialKernel1D(object):
r"""
Perform a local-polynomial regression using a user-provided kernel
(Gaussian by default).
The local constant regression is the function that minimises, for each
position:
.. math::
f_n(x) \triangleq \argmin_{a_0\in\mathbb{R}}
\sum_i K\left(\frac{x-X_i}{h}\right)
\left(Y_i - a_0 - a_1(x-X_i) - \ldots -
a_q \frac{(x-X_i)^q}{q!}\right)^2
Where :math:`K(x)` is the kernel such that :math:`E(K(x)) = 0`, :math:`q`
is the order of the fitted polynomial and :math:`h` is the bandwidth of
the method. It is also recommended to have :math:`\int_\mathbb{R} x^2K(x)dx
= 1`, (i.e. variance of the kernel is 1) or the effective bandwidth will be
scaled by the square-root of this integral (i.e. the standard deviation of
the kernel).
:param ndarray xdata: Explaining variables (at most 2D array)
:param ndarray ydata: Explained variables (should be 1D array)
:param int q: Order of the polynomial to fit. **Default:** 3
:type cov: float or callable
:param cov: If an float, it should be a variance of the gaussian kernel.
Otherwise, it should be a function ``cov(xdata, ydata)`` returning
the variance.
**Default:** ``scotts_covariance``
"""
def __init__(self, xdata, ydata, q=3, **kwords):
self.xdata = | np.atleast_1d(xdata) | numpy.atleast_1d |
"""
The MIT License (MIT)
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
Provided license texts might have their own copyrights and restrictions
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from PIL import Image, ImageDraw
import cv2
import numpy as np
import matplotlib.pyplot as plt
import PIL
import torch
import torchvision.transforms.functional as F
import torch
from .utils import deprecated
def combine_images(images: list, axis=1):
"""Combine images
Args:
images (list): image list (must have the same dimension)
axis (int): merge direction
When axis = 0, the images are merged vertically;
When axis = 1, the images are merged horizontally.
Returns:
merge image
"""
ndim = images[0].ndim
shapes = np.array([mat.shape for mat in images])
assert np.all(map(lambda e: len(e) == ndim, shapes)
), 'all images should be same ndim.'
if axis == 0: # merge images vertically
# Merge image cols
cols = np.max(shapes[:, 1])
# Expand the cols size of each image to make the cols consistent
copy_imgs = [cv2.copyMakeBorder(img, 0, 0, 0, cols - img.shape[1],
cv2.BORDER_CONSTANT, (0, 0, 0)) for img in images]
# Merge vertically
return np.vstack(copy_imgs)
else: # merge images horizontally
# Combine the rows of the image
rows = np.max(shapes[:, 0])
# Expand the row size of each image to make rows consistent
copy_imgs = [cv2.copyMakeBorder(img, 0, rows - img.shape[0], 0, 0,
cv2.BORDER_CONSTANT, (0, 0, 0)) for img in images]
# Merge horizontally
return np.hstack(copy_imgs)
def convert_to_torch_image(img):
""" Convert the image to a torch image.
Code:
img = torch.tensor(img)
img = img.permute((2, 0, 1)).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
return img
"""
return F.to_tensor(img)
def read_image(file_path, to_rgb=True, use_cv2=False):
if use_cv2:
img = cv2.imread(file_path)
if to_rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
img = Image.open(file_path)
img = img.convert('RGB')
return img
@deprecated('Only for specific experiments')
def read_image_experiments(file_path: str, to_rgb=True, vis=False, to_tensor=False):
"""Load and convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/master/references/segmentation
"""
img = read_image(file_path, to_rgb=to_rgb, vis=vis)
if to_tensor:
img_tensor = convert_to_torch_image(img)
return {'img_raw': img,
'img_tensor': img_tensor
}
def padding_image(img_arr):
w, h, c = img_arr.shape
if w > h:
padd = (w - h)//2
img_arr = cv2.copyMakeBorder(
img_arr.copy(), 10, 10, padd, padd, cv2.BORDER_CONSTANT, value=0)
elif w < h:
padd = (h - w)//2
img_arr = cv2.copyMakeBorder(
img_arr.copy(), padd, padd, 10, 10, cv2.BORDER_CONSTANT, value=0)
return img_arr
def _get_type(img):
if isinstance(img, Image.Image):
return 'pil'
elif isinstance(img, np.ndarray):
return 'np'
else:
raise ValueError('Unknown type!')
def _is_ndarray(inp):
assert isinstance(inp, np.ndarray)
def _is_list(inp):
if isinstance(inp, np.ndarray):
assert len(inp.shape) == 1
else:
assert isinstance(inp, list)
def square_box(img, box):
_is_list(box)
if isinstance(img, PIL.Image.Image):
w, h = img.size
elif isinstance(img, np.ndarray):
h, w, c = img.shape
else:
raise TypeError('img is not valid. Expect `ndarray` or `PIL`')
x, y, xx, yy = box
_cx = (xx + x)/2
_delta_y = (yy - y)/2
x = int(np.max([0, _cx - _delta_y]))
xx = int( | np.min([w, _cx + _delta_y]) | numpy.min |
from collections import defaultdict
from scipy.special import expit
import numpy as np
import pandas as pd
import torch
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
def average(vals):
return sum(vals) / len(vals)
def std(vals, mu):
var = sum([((x - mu) ** 2) for x in vals]) / len(vals)
return var ** .5
# This function creates the dictionary of predictions
# for a specific state given an array of state predictions
# for a given day with an arbitrary number of simulations
# to determine that day
def createPredictionsDict(vals):
preds = defaultdict(list)
for i in range(len(vals)):
temp = vals[i]
for j in range(len(temp)):
preds[j].append(temp[j])
return preds
# This calculates various details needed for plotting
# including number of wins for Trump vs Clinton
# and the electoral vote splits
def electoralVoteCalculator(numDays, numStates, preds, EV_Index, EV):
EV_list_trump = list()
EV_list_clinton = list()
clintonWins = 0
trumpWins = 0
for i in range(numDays):
evTotal_clinton = 0
evTotal_trump = 0
for j in range(numStates):
pct = preds[j][i]
if pct >= 0.5:
evTotal_clinton += EV[EV_Index[j]]
else:
evTotal_trump += EV[EV_Index[j]]
evTotal_clinton = int(evTotal_clinton)
evTotal_trump = int(evTotal_trump)
if evTotal_clinton > evTotal_trump:
clintonWins += 1
else:
trumpWins += 1
EV_list_clinton.append(evTotal_clinton)
EV_list_trump.append(evTotal_trump)
return clintonWins, trumpWins, EV_list_trump, EV_list_clinton
def mean_low_high(draws, states, Id):
mean = expit(draws.mean(axis=0))
high = expit(draws.mean(axis=0) + 1.96 * draws.std(axis=0))
low = expit(draws.mean(axis=0) - 1.96 * draws.std(axis=0))
Id = [Id] * len(states)
draws_df = {'states': states, 'mean': mean, 'high': high,
'low': low, 'type': Id}
draws_df = pd.DataFrame(draws_df)
return draws_df
def function_tibble(x, predicted):
temp = predicted[:, :, x]
low = np.quantile(predicted[:, :, x], 0.025, axis=0)
high = np.quantile(predicted[:, :, x], 0.975, axis=0)
mean = torch.mean(predicted[:, :, x], axis=0)
prob = (predicted[:, :, x] > 0.5).type(torch.float).mean(axis=0)
state = [x] * temp.shape[1]
t = | np.arange(temp.shape[1]) | numpy.arange |
import numpy as np
import pandas as pd
from ..stats._utils import corr, scale
from .ordi_plot import ordiplot, screeplot
class RedundancyAnalysis():
r"""Compute redundancy analysis, a type of canonical analysis.
Redundancy analysis (RDA) is a principal component analysis on predicted
values :math:`\hat{Y}` obtained by fitting response variables :math:`Y` with
explanatory variables :math:`X` using a multiple regression.
EXPLAIN WHEN TO USE RDA
Parameters
----------
y : pd.DataFrame
:math:`n \times p` response matrix, where :math:`n` is the number
of samples and :math:`p` is the number of features. Its columns
need be dimensionally homogeneous (or you can set `scale_Y=True`).
This matrix is also referred to as the community matrix that
commonly stores information about species abundances
x : pd.DataFrame
:math:`n \times m, n \geq m` matrix of explanatory
variables, where :math:`n` is the number of samples and
:math:`m` is the number of metadata variables. Its columns
need not be standardized, but doing so turns regression
coefficients into standard regression coefficients.
scale_Y : bool, optional
Controls whether the response matrix columns are scaled to
have unit standard deviation. Defaults to `False`.
scaling : int
Scaling type 1 (scaling=1) produces a distance biplot. It focuses on
the ordination of rows (samples) because their transformed
distances approximate their original euclidean
distances. Especially interesting when most explanatory
variables are binary.
Scaling type 2 produces a correlation biplot. It focuses
on the relationships among explained variables (`y`). It
is interpreted like scaling type 1, but taking into
account that distances between objects don't approximate
their euclidean distances.
See more details about distance and correlation biplots in
[1]_, \S 9.1.4.
sample_scores_type : str
Type of sample score to output, either 'lc' and 'wa'.
Returns
-------
Ordination object, Ordonation plot, Screeplot
See Also
--------
ca
cca
Notes
-----
The algorithm is based on [1]_, \S 11.1.
References
----------
.. [1] <NAME>. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
def __init__(self, scale_Y=True, scaling=1, sample_scores_type='wa',
n_permutations = 199, permute_by=[], seed=None):
# initialize the self object
if not isinstance(scale_Y, bool):
raise ValueError("scale_Y must be either True or False.")
if not (scaling == 1 or scaling == 2):
raise ValueError("scaling must be either 1 (distance analysis) or 2 (correlation analysis).")
if not (sample_scores_type == 'wa' or sample_scores_type == 'lc'):
raise ValueError("sample_scores_type must be either 'wa' or 'lc'.")
self.scale_Y = scale_Y
self.scaling = scaling
self.sample_scores_type = sample_scores_type
self.n_permutations = n_permutations
self.permute_by = permute_by
self.seed = seed
def fit(self, X, Y, W=None):
# I use Y as the community matrix and X as the constraining as_matrix.
# vegan uses the inverse, which is confusing since the response set
# is usually Y and the explaination set is usually X.
# These steps are numbered as in Legendre and Legendre, Numerical Ecology,
# 3rd edition, section 11.1.3
# 0) Preparation of data
feature_ids = X.columns
sample_ids = X.index # x index and y index should be the same
response_ids = Y.columns
X = X.as_matrix() # Constraining matrix, typically of environmental variables
Y = Y.as_matrix() # Community data matrix
if W is not None:
condition_ids = W.columns
W = W.as_matrix()
q = W.shape[1] # number of covariables (used in permutations)
else:
q=0
# dimensions
n_x, m = X.shape
n_y, p = Y.shape
if n_x == n_y:
n = n_x
else:
raise ValueError("Tables x and y must contain same number of rows.")
# scale
if self.scale_Y:
Y = (Y - Y.mean(axis=0)) / Y.std(axis=0, ddof=1)
X = X - X.mean(axis=0)# / X.std(axis=0, ddof=1)
# Note: Legendre 2011 does not scale X.
# If there is a covariable matrix W, the explanatory matrix X becomes the
# residuals of a regression between X as response and W as explanatory.
if W is not None:
W = (W - W.mean(axis=0))# / W.std(axis=0, ddof=1)
# Note: Legendre 2011 does not scale W.
B_XW = np.linalg.lstsq(W, X)[0]
X_hat = W.dot(B_XW)
X_ = X - X_hat # X is now the residual
else:
X_ = X
B = np.linalg.lstsq(X_, Y)[0]
Y_hat = X_.dot(B)
Y_res = Y - Y_hat # residuals
# 3) Perform a PCA on Y_hat
## perform singular value decomposition.
## eigenvalues can be extracted from u
## eigenvectors can be extracted from vt
u, s, vt = np.linalg.svd(Y_hat, full_matrices=False)
u_res, s_res, vt_res = np.linalg.svd(Y_res, full_matrices=False)
# compute eigenvalues from singular values
eigenvalues = s**2/(n-1)
eigenvalues_res = s_res**2/(n-1)
## determine rank kc
kc = np.linalg.matrix_rank(Y_hat)
kc_res = np.linalg.matrix_rank(Y_res)
## retain only eigenvs superior to tolerance
eigenvalues = eigenvalues[:kc]
eigenvalues_res = eigenvalues_res[:kc_res]
eigenvalues_values_all = np.r_[eigenvalues, eigenvalues_res]
trace = np.sum(np.diag(np.cov(Y.T)))
trace_res = np.sum(np.diag(np.cov(Y_res.T)))
eigenvectors = vt.T[:,:kc]
eigenvectors_res = vt_res.T[:,:kc_res]
## cannonical axes used to compute F_marginal
canonical_axes = u[:, :kc]
## axes names
ordi_column_names = ['RDA%d' % (i+1) for i in range(kc)]
ordi_column_names_res = ['RDA_res%d' % (i+1) for i in range(kc_res)]
# 4) Ordination of objects (site scores, or vegan's wa scores)
F = Y.dot(eigenvectors) # columns of F are the ordination vectors
F_res = Y_res.dot(eigenvectors_res) # columns of F are the ordination vectors
# 5) F in space X (site constraints, or vegan's lc scores)
Z = Y_hat.dot(eigenvectors)
Z_res = Y_res.dot(eigenvectors_res)
# 6) Correlation between the ordination vectors in spaces Y and X
rk = np.corrcoef(F, Z) # not used yet
rk_res = np.corrcoef(F_res, Z_res) # not used yet
# 7) Contribution of the explanatory variables X to the canonical ordination
# axes
# 7.1) C (canonical coefficient): the weights of the explanatory variables X in
# the formation of the matrix of fitted site scores
C = B.dot(eigenvectors) # not used yet
C_res = B.dot(eigenvectors_res) # not used yet
# 7.2) The correlations between X and the ordination vectors in space X are
# used to represent the explanatory variables in biplots.
corXZ = corr(X_, Z)
corXZ_res = corr(X_, Z_res)
# 8) Compute triplot objects
# I combine fitted and residuals scores into the DataFrames
singular_values_all = np.r_[s[:kc], s_res[:kc_res]]
ordi_column_names_all = ordi_column_names + ordi_column_names_res
const = np.sum(singular_values_all**2)**0.25
if self.scaling == 1:
scaling_factor = const
D = np.diag(np.sqrt(eigenvalues/trace)) # Diagonal matrix of weights (Numerical Ecology with R, p. 196)
D_res = np.diag(np.sqrt(eigenvalues_res/trace_res))
elif self.scaling == 2:
scaling_factor = singular_values_all / const
D = np.diag(np.ones(kc)) # Diagonal matrix of weights
D_res = np.diag(np.ones(kc_res))
response_scores = pd.DataFrame(np.hstack((eigenvectors, eigenvectors_res)) * scaling_factor,
index=response_ids,
columns=ordi_column_names_all)
response_scores.index.name = 'ID'
if self.sample_scores_type == 'wa':
sample_scores = pd.DataFrame(np.hstack((F, F_res)) / scaling_factor,
index=sample_ids,
columns=ordi_column_names_all)
elif self.sample_scores_type == 'lc':
sample_scores = pd.DataFrame(np.hstack((Z, Z_res)) / scaling_factor,
index=sample_ids,
columns=ordi_column_names_all)
sample_scores.index.name = 'ID'
biplot_scores = pd.DataFrame(np.hstack((corXZ.dot(D), corXZ_res.dot(D_res))) * scaling_factor,
index=feature_ids,
columns=ordi_column_names_all)
biplot_scores.index.name = 'ID'
sample_constraints = pd.DataFrame(np.hstack((Z, F_res)) / scaling_factor,
index=sample_ids,
columns=ordi_column_names_all)
sample_constraints.index.name = 'ID'
p_explained = pd.Series(singular_values_all / singular_values_all.sum(), index=ordi_column_names_all)
# Statistics
## Response statistics
### Unadjusted R2
SSY_i = np.sum(Y**2, axis=0)#np.array([np.sum((Y[:, i] - Y[:, i].mean())**2) for i in range(p)])
SSYhat_i = np.sum(Y_hat**2, axis=0)#np.array([np.sum((Y_hat[:, i] - Y_hat[:, i].mean())**2) for i in range(p)])
SSYres_i = np.sum(Y_res**2, axis=0)#np.array([np.sum((Y_res[:, i] - Y_res[:, i].mean())**2) for i in range(p)])
R2_i = SSYhat_i/SSY_i
R2 = np.mean(R2_i)
### Adjusted R2
R2a_i = 1-((n-1)/(n-m-1))*(1-R2_i)
R2a = np.mean(R2a_i)
### F-statistic
F_stat_i = (R2_i/m) / ((1-R2_i) / (n-m-1))
F_stat = (R2/m) / ((1-R2) / (n-m-1))
response_stats_each = pd.DataFrame({'R2': R2_i, 'Adjusted R2': R2a_i, 'F': F_stat_i},
index = response_ids)
response_stats_summary = pd.DataFrame({'R2': R2, 'Adjusted R2': R2a, 'F':F_stat},
index = ['Summary'])
response_stats = pd.DataFrame(pd.concat([response_stats_each, response_stats_summary], axis=0),
columns = ['F', 'R2', 'Adjusted R2'])
## Canonical axis statistics
"""
the permutation algorithm is inspired by the supplementary material
published i Legendre et al., 2011, doi 10.1111/j.2041-210X.2010.00078.x
"""
if 'axes' in self.permute_by:
if W is None:
F_m = s[0]**2 / (np.sum(Y**2) - np.sum(Y_hat**2))
F_m_perm = np.array([])
for j in range(self.n_permutations):
Y_perm = Y[np.random.permutation(n), :] # full permutation model
B_perm = np.linalg.lstsq(X_, Y_perm)[0]
Y_hat_perm = X_.dot(B_perm)
s_perm = np.linalg.svd(Y_hat_perm, full_matrices=False)[1]
F_m_perm = np.r_[F_m_perm, s_perm[0]**2 / (np.sum(Y_perm**2) - np.sum(Y_hat_perm**2))]
F_marginal = F_m
p_values = (1 + np.sum(F_m_perm >= F_m)) / (1 + self.n_permutations)
begin = 1
else:
F_marginal = np.array([])
p_values = np.array([])
begin = 0
if (W is not None) or (W is None and kc > 1):
if W is None:
XW = X_
else:
XW = np.c_[X_, W]
# Compute F_marginal
B_XW = np.linalg.lstsq(XW, Y)[0]
Y_hat_XW = XW.dot(B_XW)
kc_XW = np.linalg.matrix_rank(XW)
F_marginal_XW = s[begin:kc]**2 / (np.sum(Y**2) - np.sum(Y_hat_XW**2))
F_marginal = np.r_[F_marginal, F_marginal_XW]
for i in range(begin, kc):
# set features to compute the object to fit with Y_perm
# and to compute Y_perm from Y_res_i
if W is None:
features_i = np.c_[np.repeat(1, n), canonical_axes[:, :i]]
else:
features_i = np.c_[W, canonical_axes[:, :i]] # if i==0, then np.c_[W, X[:, :i]] == W
B_fX_ = np.linalg.lstsq(features_i, X_)[0]
X_hat_i = features_i.dot(B_fX_)
X_res_i = X_ - X_hat_i
# to avoid collinearity
X_res_i = X_res_i[:, :np.linalg.matrix_rank(X_res_i)]
# find Y residuals for permutations with residuals model (only model available)
B_i = np.linalg.lstsq(features_i, Y)[0] # coefficients for axis i
Y_hat_i = features_i.dot(B_i) # Y estimation for axis i
Y_res_i = Y - Y_hat_i # Y residuals for axis i
F_m_perm = np.array([])
for j in range(self.n_permutations):
Y_perm = Y_hat_i + Y_res_i[np.random.permutation(n), :] # reduced permutation model
B_perm = np.linalg.lstsq(X_res_i, Y_perm)[0]
Y_hat_perm = X_res_i.dot(B_perm)
u_perm, s_perm, vt_perm = np.linalg.svd(Y_hat_perm, full_matrices=False)
B_tot_perm = np.linalg.lstsq(XW, Y_perm)[0]
Y_hat_tot_perm = XW.dot(B_tot_perm)
F_m_perm = np.r_[F_m_perm, s_perm[0]**2 / (np.sum(Y_perm**2) - np.sum(Y_hat_tot_perm**2))]
p_values = np.r_[p_values, (1 + np.sum(F_m_perm >= F_marginal[i])) / (1 + self.n_permutations)]
axes_stats = pd.DataFrame({'F marginal': F_marginal * (n-1-kc_XW), 'P value (>F)': p_values},
index=['RDA%d' % (i+1) for i in range(kc)])
else:
axes_stats = None
if 'features' in self.permute_by:
p_values_coef = np.array([]) # initiate empty vector for p-values
F_coef = np.array([]) # initiate empty vector for F-scores
for i in range(X_.shape[1]):
feature_i = np.c_[X_[:, i]] # isolate the explanatory variable to test
B_i = np.linalg.lstsq(feature_i, Y)[0] # coefficients for variable i
Y_hat_i = feature_i.dot(B_i) # Y estimation for explanatory variable i
Y_res_i = Y - Y_hat_i # Y residuals for variable i
if W is None:
rsq_i = np.sum(Y_hat_i**2) / np.sum(Y**2) # r-square for variable i
F_coef = np.r_[F_coef, (rsq_i/m) / ((1-rsq_i) / (n-m-1))] # F-score for variable i, from eq. 7 in LOtB, 2011
else:
F_coef = np.r_[F_coef, ( | np.sum(Y_hat_i**2) | numpy.sum |
#this code is the workbench for q-learning
#it consists on a lifting particle that must reach a certain height
#it is only subjected to gravity
#Force applied to the particle might be fixed 9.9 or 9.7N
import numpy as np
import math
import random
import matplotlib.pyplot as plt
#INITIALIZE VARIABLES
######################
m=1 #1kg mass
g=9.80 #gravity
dt=0.05 #simulation time
Final_height=50 #1m
Final_vel=0
#STATES are discretized 0-1-2-3...50...-59-60 cm and speed is discretized in
n_pos=61
STATES=np.linspace(0,Final_height+10,Final_height+10+1)
#SPEEDS are discretized -10,-9,-8...0,1,2,3...,50cm/s.
n_speeds=61
SPEEDS=np.linspace(-10,50,n_speeds)
#ROWS= States (61*61=3721 rows)
#COLUMNS= Actions (9.9 , 9.7) two actions
Rows=n_pos*n_speeds
Columns=2
Actions=([9.9, 9.7])
#time steps
n_items=302
x=np.linspace(0,301,n_items)
#Initialize Q matrix
Q=np.ones((Rows,Columns))
#Q-learning variables
alpha=0.5
gamma=0.5
epsilon=0.15
goalCounter=0
Contador=0
#function to choose the Action
def ChooseAction (Columns,Q,state):
if np.random.uniform() < epsilon:
rand_action=np.random.permutation(Columns)
action=rand_action[1] #current action
F=Actions[action]
max_index=1
# if not select max action in Qtable (act greedy)
else:
QMax=max(Q[state])
max_indices=np.where(Q[state]==QMax)[0] # Identify all indexes where Q equals max
n_hits=len(max_indices) # Number of hits
max_index=int(max_indices[random.randint(0, n_hits-1)]) # If many hits, choose randomly
F=Actions[max_index]
return F, max_index
#function to apply the dynamic model
def ActionToState(F,g,m,dt,z_pos_old,z_vel_old,z_accel_old):
z_accel=(-g + F/m)*100
z_vel=z_vel_old + (z_accel+z_accel_old)/2*dt
z_pos=z_pos_old + (z_vel+z_vel_old)/2*dt
z_accel_old=z_accel
z_vel_old=z_vel
z_pos_old=z_pos
return z_accel,z_vel,z_pos,z_vel_old,z_pos_old
#BEGINNING of the algorithm
for episode in range(1,200000):
# initial state
z_pos=np.zeros(n_items)
z_vel=np.zeros(n_items)
z_accel=np.zeros(n_items)
z_pos_goal= | np.zeros((1000, n_items)) | numpy.zeros |
import numpy as np
import pytest
import rdsolver as rd
def test_grid_points_1d():
# Test standard
correct = np.array([1, 2, 3, 4, 5]).astype(float) / 5 * 2 * np.pi
assert np.isclose(rd.utils.grid_points_1d(5), correct).all()
# Test standard with specified length
correct = | np.array([1, 2, 3, 4, 5]) | numpy.array |
import numpy as np
import pandas as pd
import xarray as xr
import Grid
from timeit import default_timer as timer
err = 1e-5
limit = 1e5
alpha = 0.005
# ---- BASIC FUNCTIONS ----
def ur(mI, mB):
return (mB * mI) / (mB + mI)
def nu(gBB):
return np.sqrt(gBB)
def epsilon(kx, ky, kz, mB):
return (kx**2 + ky**2 + kz**2) / (2 * mB)
def omegak(kx, ky, kz, mB, n0, gBB):
ep = epsilon(kx, ky, kz, mB)
return np.sqrt(ep * (ep + 2 * gBB * n0))
def Omega(kx, ky, kz, DP, mI, mB, n0, gBB):
return omegak(kx, ky, kz, mB, n0, gBB) + (kx**2 + ky**2 + kz**2) / (2 * mI) - kz * DP / mI
def Wk(kx, ky, kz, mB, n0, gBB):
# old_settings = np.seterr(); np.seterr(all='ignore')
output = np.sqrt(epsilon(kx, ky, kz, mB) / omegak(kx, ky, kz, mB, n0, gBB))
# np.seterr(**old_settings)
return output
def g(kxg, kyg, kzg, dVk, aIBi, mI, mB, n0, gBB):
# gives bare interaction strength constant
old_settings = np.seterr(); np.seterr(all='ignore')
mR = ur(mI, mB)
integrand = 2 * mR / (kxg**2 + kyg**2 + kzg**2)
mask = np.isinf(integrand); integrand[mask] = 0
np.seterr(**old_settings)
return 1 / ((mR / (2 * np.pi)) * aIBi - np.sum(integrand) * dVk)
# ---- CALCULATION HELPER FUNCTIONS ----
def ImpMomGrid_from_PhononMomGrid(kgrid, P):
kx = kgrid.getArray('kx'); ky = kgrid.getArray('ky'); kz = kgrid.getArray('kz')
PI_x = -1 * kx; PI_y = -1 * ky; PI_z = P - kz
PI_x_ord = np.flip(PI_x, 0); PI_y_ord = np.flip(PI_y, 0); PI_z_ord = np.flip(PI_z, 0)
PIgrid = Grid.Grid('CARTESIAN_3D')
PIgrid.initArray_premade('kx', PI_x_ord); PIgrid.initArray_premade('ky', PI_y_ord); PIgrid.initArray_premade('kz', PI_z_ord)
return PIgrid
def FWHM(x, f):
# f is function of x -> f(x)
if np.abs(np.max(f) - np.min(f)) < 1e-2:
return 0
else:
D = f - np.max(f) / 2
indices = np.where(D > 0)[0]
return x[indices[-1]] - x[indices[0]]
def xyzDist_ProjSlices(phonon_pos_dist, phonon_mom_dist, grid_size_args, grid_diff_args):
nxyz = phonon_pos_dist
nPB = phonon_mom_dist
Nx, Ny, Nz = grid_size_args
dx, dy, dz, dkx, dky, dkz = grid_diff_args
# slice directions
nPB_x_slice = nPB[:, Ny // 2, Nz // 2]
nPB_y_slice = nPB[Nx // 2, :, Nz // 2]
nPB_z_slice = nPB[Nx // 2, Ny // 2, :]
nPB_xz_slice = nPB[:, Ny // 2, :]
nPB_xy_slice = nPB[:, :, Nz // 2]
nxyz_x_slice = nxyz[:, Ny // 2, Nz // 2]
nxyz_y_slice = nxyz[Nx // 2, :, Nz // 2]
nxyz_z_slice = nxyz[Nx // 2, Ny // 2, :]
nxyz_xz_slice = nxyz[:, Ny // 2, :]
nxyz_xy_slice = nxyz[:, :, Nz // 2]
nPI_x_slice = np.flip(nPB_x_slice, 0)
nPI_y_slice = np.flip(nPB_y_slice, 0)
nPI_z_slice = np.flip(nPB_z_slice, 0)
nPI_xz_slice = np.flip(np.flip(nPB_xz_slice, 0), 1)
nPI_xy_slice = np.flip(np.flip(nPB_xy_slice, 0), 1)
pos_slices = nxyz_x_slice, nxyz_y_slice, nxyz_z_slice
mom_slices = nPB_x_slice, nPB_y_slice, nPB_z_slice, nPI_x_slice, nPI_y_slice, nPI_z_slice
cont_slices = nxyz_xz_slice, nxyz_xy_slice, nPB_xz_slice, nPB_xy_slice, nPI_xz_slice, nPI_xy_slice
# integrate directions
nPB_x = np.sum(nPB, axis=(1, 2)) * dky * dkz
nPB_y = np.sum(nPB, axis=(0, 2)) * dkx * dkz
nPB_z = np.sum(nPB, axis=(0, 1)) * dkx * dky
nxyz_x = np.sum(nxyz, axis=(1, 2)) * dy * dz
nxyz_y = np.sum(nxyz, axis=(0, 2)) * dx * dz
nxyz_z = np.sum(nxyz, axis=(0, 1)) * dx * dy
nPI_x = np.flip(nPB_x, 0)
nPI_y = np.flip(nPB_y, 0)
nPI_z = np.flip(nPB_z, 0)
pos_integration = nxyz_x, nxyz_y, nxyz_z
mom_integration = nPB_x, nPB_y, nPB_z, nPI_x, nPI_y, nPI_z
return pos_slices, mom_slices, cont_slices, pos_integration, mom_integration
# @profile
def xyzDist_To_magDist(kgrid, phonon_mom_dist, P):
nPB = phonon_mom_dist
# kgrid is the Cartesian grid upon which the 3D matrix nPB is defined -> nPB is the phonon momentum distribution in kx,ky,kz
kxg, kyg, kzg = np.meshgrid(kgrid.getArray('kx'), kgrid.getArray('ky'), kgrid.getArray('kz'), indexing='ij', sparse=True) # can optimize speed by taking this from the coherent_state precalculation
dVk_const = kgrid.dV()[0] * (2 * np.pi)**(3)
PB = np.sqrt(kxg**2 + kyg**2 + kzg**2)
PI = np.sqrt((-kxg)**2 + (-kyg)**2 + (P - kzg)**2)
PB_flat = PB.reshape(PB.size)
PI_flat = PI.reshape(PI.size)
nPB_flat = nPB.reshape(nPB.size)
PB_series = pd.Series(nPB_flat, index=PB_flat)
PI_series = pd.Series(nPB_flat, index=PI_flat)
nPBm_unique = PB_series.groupby(PB_series.index).sum() * dVk_const
nPIm_unique = PI_series.groupby(PI_series.index).sum() * dVk_const
PB_unique = nPBm_unique.keys().values
PI_unique = nPIm_unique.keys().values
nPBm_cum = nPBm_unique.cumsum()
nPIm_cum = nPIm_unique.cumsum()
# CDF and PDF pre-processing
PBm_Vec, dPBm = np.linspace(0, | np.max(PB_unique) | numpy.max |
#Utility Functions
import copy
from collections import defaultdict
import glob
import os
import random
from stl import mesh
#Math Functions
import alphashape
from descartes import PolygonPatch
import math
import numpy as np
import scipy.linalg as ling
from scipy.spatial import Delaunay
from scipy.special import jn
#Drawing Functios
import matplotlib.pyplot
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
from matplotlib.colors import LightSource
from matplotlib import cm
#Other Modules
from faser_math import fsr
from faser_utils.disp.disp import disp, progressBar
# Create an instance of a LightSource and use it to illuminate the surface.
def alpha_shape_3D(pos, alpha):
"""
Compute the alpha shape (concave hull) of a set of 3D points.
Parameters:
pos - np.array of shape (n, 3) points.
alpha - alpha value.
return
outer surface vertex indices, edge indices, and triangle indices
"""
#Function found here https://stackoverflow.com/questions/26303878/alpha-shapes-in-3d
tetra = Delaunay(pos)
# Find radius of the circumsphere.
# By definition, radius of the sphere fitting inside the tetrahedral needs
# to be smaller than alpha value
# http://mathworld.wolfram.com/Circumsphere.html
tetrapos = np.take(pos, tetra.vertices, axis=0)
normsq = np.sum(tetrapos**2, axis=2)[:,:,None]
ones = np.ones((tetrapos.shape[0], tetrapos.shape[1], 1))
a = np.linalg.det(np.concatenate((tetrapos, ones), axis=2))
Dx = np.linalg.det(np.concatenate((normsq, tetrapos[:,:,[1, 2]], ones), axis=2))
Dy = -np.linalg.det(np.concatenate((normsq, tetrapos[:,:,[0, 2]], ones), axis=2))
Dz = np.linalg.det(np.concatenate((normsq, tetrapos[:,:,[0, 1]], ones), axis=2))
c = np.linalg.det(np.concatenate((normsq, tetrapos), axis=2))
r = np.sqrt(Dx**2+Dy**2+Dz**2-4*a*c)/(2*np.abs(a))
# Find tetrahedrals
tetras = tetra.vertices[r<alpha,:]
# triangles
TriComb = np.array([(0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)])
Triangles = tetras[:,TriComb].reshape(-1, 3)
Triangles = np.sort(Triangles, axis=1)
# Remove triangles that occurs twice, because they are within shapes
TrianglesDict = defaultdict(int)
for tri in Triangles:TrianglesDict[tuple(tri)] += 1
Triangles=np.array([tri for tri in TrianglesDict if TrianglesDict[tri] ==1])
#edges
EdgeComb=np.array([(0, 1), (0, 2), (1, 2)])
Edges=Triangles[:,EdgeComb].reshape(-1, 2)
Edges=np.sort(Edges, axis=1)
Edges=np.unique(Edges, axis=0)
Vertices = np.unique(Edges)
return Vertices,Edges,Triangles
def DrawManipulability(J, tm, lenfactor, ax):
"""Short summary.
Args:
J (type): Description of parameter `J`.
tm (type): Description of parameter `tm`.
lenfactor (type): Description of parameter `lenfactor`.
Returns:
type: Description of returned object.
"""
p = tm[0:3, 3]
R = tm[0:3, 2]
Aw = J[0:3,:] @ J[0:3,:].conj().transpose()
Av = J[3:6,:] @ J[3:6,:].conj().transpose()
weigv, weigd = ling.eig(Aw)
weig = math.sqrt(np.diag(weigd))
[veigv, veigd] = ling.eig(Av)
veig = math.sqrt(np.diag(veigd))
weigvs = weigv.copy()
veigvs = veigv.copy()
for i in range(0, 3):
weigvs[0:3, i] = R @ weigv[0:3, i]
veigvs[0:3, i] = R @ veigv[0:3, i]
for i in range(0, 3):
pw = p + lenfactor * weigvs[0:3, i] * weig[i]
pv = p + lenfactor * veigvs[0:3, i] * veig[i]
ax.plot3D(p, pw)
ax.plot3D(p, pv)
def drawROM(arm, ares, ax):
"""Short summary.
Args:
arm (type): Description of parameter `arm`.
ares (type): Description of parameter `ares`.
ax (type): Description of parameter `ax`.
Returns:
type: Description of returned object.
"""
farthestx = []
farthesty = []
farthestz = []
lmin = -180
lmax = 180
for i in range(50000):
print(i/50000*100)
t = arm.FK(np.random.rand(7) * 2 * np.pi - np.pi)
ta = fsr.TMtoTAA(t)
farthestx.append(ta[0])
farthesty.append(ta[1])
farthestz.append(ta[2])
ax.scatter3D(farthestx, farthesty, farthestz, s = 2)
def DrawSTL(tm, fname, ax, scale = 1.0):
"""Short summary.
Args:
tm (type): Description of parameter `tm`.
fname (type): Description of parameter `fname`.
ax (type): Description of parameter `ax`.
scale (type): Description of parameter `scale`.
Returns:
type: Description of returned object.
"""
#make sure to install nuumpy-stl and not stl
t_mesh = mesh.Mesh.from_file(fname)
for i in range(len(t_mesh.x)):
for j in range(3):
t_mesp = fsr.TAAtoTM(np.array([t_mesh.x[i, j], t_mesh.y[i, j], t_mesh.z[i, j], 0, 0, 0]))
#disp(t_mesh.x[i, j])
t_new = tm @ t_mesp
mesp_aa = fsr.TMtoTAA(t_new)
t_mesh.x[i, j] = mesp_aa[0] * scale
t_mesh.y[i, j] = mesp_aa[1] * scale
t_mesh.z[i, j] = mesp_aa[2] * scale
X = t_mesh.x
Y = t_mesh.y
Z = t_mesh.z
light = LightSource(90, 45)
illuminated_surface = light.shade(Z, cmap=cm.coolwarm)
ax.plot_surface(t_mesh.x, t_mesh.y, t_mesh.z, rstride=1, cstride=1, linewidth=0, antialiased=False,
facecolors=illuminated_surface)
#ax.add_collection3d(mplot3d.art3d.Poly3DCollection(t_mesh.vectors))
#ax.auto_scale_xyz(scale, scale, scale)
def getSTLProps(fname):
"""Short summary.
Args:
fname (type): Description of parameter `fname`.
Returns:
type: Description of returned object.
"""
#Return center of Mass, inertia, etc
new_mesh = mesh.Mesh.from_file(fname)
return new_mesh.get_mass_properties()
def QuadPlot(p1, p2, dim, ax, c = 'b'):
"""Short summary.
Args:
p1 (type): Description of parameter `p1`.
p2 (type): Description of parameter `p2`.
dim (type): Description of parameter `dim`.
ax (type): Description of parameter `ax`.
c (type): Description of parameter `c`.
Returns:
type: Description of returned object.
"""
bl = p1.spawnNew([0, -dim[0]/2, -dim[1]/2, 0, 0, 0])
br = p1.spawnNew([0, -dim[0]/2, dim[1]/2, 0, 0, 0])
tl = p1.spawnNew([0, dim[0]/2, -dim[1]/2, 0, 0, 0])
tr = p1.spawnNew([0, dim[0]/2, dim[1]/2, 0, 0, 0])
p1a = p1
p2a = p2
p1bl = p1 @ bl
p2bl = p2 @ bl
p1br = p1 @ br
p2br = p2 @ br
p1tl = p1 @ tl
p2tl = p2 @ tl
p1tr = p1 @ tr
p2tr = p2 @ tr
#Core
ax.plot3D((p1bl[0], p2bl[0]),(p1bl[1], p2bl[1]),(p1bl[2], p2bl[2]), c)
ax.plot3D((p1br[0], p2br[0]),(p1br[1], p2br[1]),(p1br[2], p2br[2]), c)
ax.plot3D((p1tl[0], p2tl[0]),(p1tl[1], p2tl[1]),(p1tl[2], p2tl[2]), c)
ax.plot3D((p1tr[0], p2tr[0]),(p1tr[1], p2tr[1]),(p1tr[2], p2tr[2]), c)
#End
ax.plot3D((p2tl[0], p2bl[0]),(p2tl[1], p2bl[1]),(p2tl[2], p2bl[2]), c)
ax.plot3D((p2tr[0], p2br[0]),(p2tr[1], p2br[1]),(p2tr[2], p2br[2]), c)
ax.plot3D((p2bl[0], p2br[0]),(p2bl[1], p2br[1]),(p2bl[2], p2br[2]), c)
ax.plot3D((p2tl[0], p2tr[0]),(p2tl[1], p2tr[1]),(p2tl[2], p2tr[2]), c)
#ax.plot3D((p1tl[0], p1bl[0]),(p1tl[1], p1bl[1]),(p1tl[2], p1bl[2]), c)
#ax.plot3D((p1tr[0], p1br[0]),(p1tr[1], p1br[1]),(p1tr[2], p1br[2]), c)
#ax.plot3D((p1bl[0], p1br[0]),(p1bl[1], p1br[1]),(p1bl[2], p1br[2]), c)
#ax.plot3D((p1tl[0], p1tr[0]),(p1tl[1], p1tr[1]),(p1tl[2], p1tr[2]), c)
def DrawArm(arm, ax, jrad = .1, jdia = .3, lens = 1, c = 'grey', forces = np.zeros((1))):
"""Short summary.
Args:
arm (type): Description of parameter `arm`.
ax (type): Description of parameter `ax`.
jrad (type): Description of parameter `jrad`.
jdia (type): Description of parameter `jdia`.
lens (type): Description of parameter `lens`.
c (type): Description of parameter `c`.
forces (type): Description of parameter `forces`.
Returns:
DrawArm(arm, ax, jrad = .1, jdia = .3, lens = 1, c = 'grey', forces =: Description of returned object.
"""
startind = 0
while (sum(arm.screw_list[3:6, startind]) == 1):
startind = startind + 1
poses = arm.getJointTransforms()
p = np.zeros((3, len(poses[startind:])))
for i in range(startind, len(poses[startind:])):
if poses[i] == None:
continue
p[0, i] = (poses[i].TAA[0])
p[1, i] = (poses[i].TAA[1])
p[2, i] = (poses[i].TAA[2])
ax.scatter3D(p[0,:], p[1,:], p[2,:])
ax.plot3D(p[0,:], p[1,:], p[2,:])
Dims = np.copy(arm.link_dimensions).T
dofs = arm.screw_list.shape[1]
yrot = poses[0].spawnNew([0, 0, 0, 0, np.pi/2, 0])
xrot = poses[0].spawnNew([0, 0, 0, np.pi/2, 0, 0])
zrot = poses[0].spawnNew([0, 0, 0, 0, 0, np.pi])
for i in range(startind, dofs):
zed = poses[i]
DrawAxes(zed, lens, ax)
try:
#Tp = fsr.tmInterpMidpoint(poses[i], poses[i+1])
#T = fsr.adjustRotationToMidpoint(Tp ,poses[i], poses[i+1], mode = 1)
#disp(T)
#DrawRectangle(T, Dims[i+1, 0:3], ax, c = c)
QuadPlot(poses[i], poses[i+1], Dims[i+1, 0:3], ax, c = c)
if len(forces) != 1:
label = '%.1fNm' % (forces[i])
ax.text(poses[i][0], poses[i][1], poses[i][2], label)
if (arm.joint_axes[0, i] == 1):
if len(forces) != 1:
DrawTube(zed @ yrot, jrad, forces[i]/300, ax)
else:
DrawTube(zed @ yrot, jrad, jdia, ax)
elif (arm.joint_axes[1, i] == 1):
if len(forces) != 1:
DrawTube(zed @ xrot, jrad, forces[i]/300, ax)
else:
DrawTube(zed @ xrot, jrad, jdia, ax)
else:
if len(forces) != 1:
DrawTube(zed @ zrot, jrad, forces[i]/300, ax)
else:
DrawTube(zed @ zrot, jrad, jdia, ax)
except:
pass
zed = poses[0].gTAA()
if startind ==0:
DrawRectangle(arm.base_pos_global @
fsr.TAAtoTM(np.array([0, 0, Dims[len(Dims)-1, 2]/2, 0, 0, 0])),
Dims[len(Dims)-1, 0:3], ax, c = c)
for i in range(len(arm.cameras)):
DrawCamera(arm.cameras[i][0], 1, ax)
def DrawLine(tf1, tf2, ax, col = 'blue'):
"""Short summary.
Args:
tf1 (type): Description of parameter `tf1`.
tf2 (type): Description of parameter `tf2`.
ax (type): Description of parameter `ax`.
col (type): Description of parameter `col`.
Returns:
type: Description of returned object.
"""
ax.plot3D([tf1[0], tf2[0]], [tf1[1], tf2[1]], [tf1[2], tf2[2]], col)
def DrawMobilePlatform(pl, ax, col = 'blue'):
"""Short summary.
Args:
pl (type): Description of parameter `pl`.
ax (type): Description of parameter `ax`.
col (type): Description of parameter `col`.
Returns:
type: Description of returned object.
"""
DrawTube(pl.loc @ pl.fl, pl.wrad, .3, ax)
DrawTube(pl.loc @ pl.fr, pl.wrad, .3, ax)
DrawTube(pl.loc @ pl.bl, pl.wrad, .3, ax)
DrawTube(pl.loc @ pl.br, pl.wrad, .3, ax)
DrawRectangle(pl.loc, pl.dims, ax, col)
def DrawSP(sp, ax, col = 'green', forces = 1):
"""Short summary.
Args:
sp (type): Description of parameter `sp`.
ax (type): Description of parameter `ax`.
col (type): Description of parameter `col`.
forces (type): Description of parameter `forces`.
Returns:
type: Description of returned object.
"""
for i in range(6):
ax.plot3D([sp.getBottomJoints()[0, i], sp.getBottomJoints()[0,(i+1)%6]],
[sp.getBottomJoints()[1, i], sp.getBottomJoints()[1,(i+1)%6]],
[sp.getBottomJoints()[2, i], sp.getBottomJoints()[2,(i+1)%6]], 'blue')
ax.plot3D([sp.getTopJoints()[0, i], sp.getTopJoints()[0,(i+1)%6]],
[sp.getTopJoints()[1, i], sp.getTopJoints()[1,(i+1)%6]],
[sp.getTopJoints()[2, i], sp.getTopJoints()[2,(i+1)%6]], 'blue')
if i == 0:
ax.plot3D([sp.getBottomJoints()[0, i], sp.getTopJoints()[0, i]],
[sp.getBottomJoints()[1, i], sp.getTopJoints()[1, i]],
[sp.getBottomJoints()[2, i], sp.getTopJoints()[2, i]], 'darkred')
elif i == 1:
ax.plot3D([sp.getBottomJoints()[0, i], sp.getTopJoints()[0, i]],
[sp.getBottomJoints()[1, i], sp.getTopJoints()[1, i]],
[sp.getBottomJoints()[2, i], sp.getTopJoints()[2, i]], 'salmon')
else:
ax.plot3D([sp.getBottomJoints()[0, i], sp.getTopJoints()[0, i]],
[sp.getBottomJoints()[1, i], sp.getTopJoints()[1, i]],
[sp.getBottomJoints()[2, i], sp.getTopJoints()[2, i]], col)
if(sp.bottom_plate_thickness != 0):
aa = sp.nominal_plate_transform.spawnNew([
sp.getBottomJoints()[0, i],
sp.getBottomJoints()[1, i],
sp.getBottomJoints()[2, i],
sp.getBottomT()[3],
sp.getBottomT()[4],
sp.getBottomT()[5]]) @ (-1 * sp.nominal_plate_transform)
ab = sp.nominal_plate_transform.spawnNew([
sp.getBottomJoints()[0,(i+1)%6],
sp.getBottomJoints()[1,(i+1)%6],
sp.getBottomJoints()[2,(i+1)%6],
sp.getBottomT()[3],
sp.getBottomT()[4],
sp.getBottomT()[5]]) @ (-1 * sp.nominal_plate_transform)
ba = sp.nominal_plate_transform.spawnNew([
sp.getTopJoints()[0, i],
sp.getTopJoints()[1, i],
sp.getTopJoints()[2, i],
sp.getTopT()[3],
sp.getTopT()[4],
sp.getTopT()[5]]) @ (sp.nominal_plate_transform)
bb = sp.nominal_plate_transform.spawnNew([
sp.getTopJoints()[0,(i+1)%6],
sp.getTopJoints()[1,(i+1)%6],
sp.getTopJoints()[2,(i+1)%6],
sp.getTopT()[3],
sp.getTopT()[4],
sp.getTopT()[5]]) @ (sp.nominal_plate_transform)
ax.plot3D([aa[0], ab[0]],[aa[1], ab[1]],[aa[2], ab[2]], 'blue')
ax.plot3D([ba[0], bb[0]],[ba[1], bb[1]],[ba[2], bb[2]], 'blue')
ax.plot3D([sp.getBottomJoints()[0, i], aa[0]],
[sp.getBottomJoints()[1, i], aa[1]],
[sp.getBottomJoints()[2, i], aa[2]], 'blue')
ax.plot3D([sp.getTopJoints()[0, i], ba[0]],
[sp.getTopJoints()[1, i], ba[1]],
[sp.getTopJoints()[2, i], ba[2]], 'blue')
if forces == 1 and sp.getLegForces().size > 1:
for i in range(6):
label = '%.1fN' % (sp.getLegForces()[i])
if i % 2 == 0:
pos = sp.getActuatorLoc(i, 'b')
else:
pos = sp.getActuatorLoc(i, 't')
ax.text(pos[0], pos[1], pos[2], label)
def DrawInterPlate(sp1, sp2, ax, col):
"""Short summary.
Args:
sp1 (type): Description of parameter `sp1`.
sp2 (type): Description of parameter `sp2`.
ax (type): Description of parameter `ax`.
col (type): Description of parameter `col`.
Returns:
type: Description of returned object.
"""
for i in range(6):
aa = sp1.nominal_plate_transform.spawnNew([
sp1.getTopJoints()[0, i],
sp1.getTopJoints()[1, i],
sp1.getTopJoints()[2, i],
sp1.getTopT()[3],
sp1.getTopT()[4],
sp1.getTopT()[5]]) @ (sp1.nominal_plate_transform)
ab = sp1.nominal_plate_transform.spawnNew([
sp1.getTopJoints()[0,(i+1)%6],
sp1.getTopJoints()[1,(i+1)%6],
sp1.getTopJoints()[2,(i+1)%6],
sp1.getTopT()[3],
sp1.getTopT()[4],
sp1.getTopT()[5]]) @ (sp1.nominal_plate_transform)
ba = sp2.nominal_plate_transform.spawnNew([
sp2.getBottomJoints()[0, i],
sp2.getBottomJoints()[1, i],
sp2.getBottomJoints()[2, i],
sp2.getBottomT()[3],
sp2.getBottomT()[4],
sp2.getBottomT()[5]]) @ (-1 * sp2.nominal_plate_transform)
bb = sp2.nominal_plate_transform.spawnNew([
sp2.getBottomJoints()[0,(i+1)%6],
sp2.getBottomJoints()[1,(i+1)%6],
sp2.getBottomJoints()[2,(i+1)%6],
sp2.getBottomT()[3],
sp2.getBottomT()[4],
sp2.getBottomT()[5]]) @ (-1 * sp2.nominal_plate_transform)
#ax.plot3D([aa[0], ab[0]],[aa[1], ab[1]],[aa[2], ab[2]], 'g')
#ax.plot3D([ba[0], bb[0]],[ba[1], bb[1]],[ba[2], bb[2]], 'g')
ax.plot3D(
[sp2.getBottomJoints()[0, i], aa[0]],
[sp2.getBottomJoints()[1, i], aa[1]],
[sp2.getBottomJoints()[2, i], aa[2]], 'g')
ax.plot3D(
[sp1.getTopJoints()[0, i], ba[0]],
[sp1.getTopJoints()[1, i], ba[1]],
[sp1.getTopJoints()[2, i], ba[2]], 'g')
def DrawAssembler(spl, ax, col = 'green', forces = 1):
"""Short summary.
Args:
spl (type): Description of parameter `spl`.
ax (type): Description of parameter `ax`.
col (type): Description of parameter `col`.
forces (type): Description of parameter `forces`.
Returns:
type: Description of returned object.
"""
for i in range(spl.numsp):
DrawSP(spl.splist[i], ax , col, forces)
if i + 1 < spl.numsp:
DrawInterPlate(spl.splist[i], spl.splist[i+1], ax, col)
def DrawCamera(cam, size, ax):
"""Short summary.
Args:
cam (type): Description of parameter `cam`.
size (type): Description of parameter `size`.
ax (type): Description of parameter `ax`.
Returns:
type: Description of returned object.
"""
DrawAxes(cam.CamT, size/2, ax)
ScreenLoc = cam.CamT @ fsr.TAAtoTM(np.array([0, 0, size, 0, 0, 0]))
imgT = cam.getFrameSize(size)
print(imgT)
Scr = np.zeros((4, 3))
t = ScreenLoc @ fsr.TAAtoTM(np.array([-imgT[0], imgT[1], 0, 0, 0, 0]))
Scr[0, 0:3] = t[0:3].flatten()
t = ScreenLoc @ fsr.TAAtoTM(np.array([imgT[0], imgT[1], 0, 0, 0, 0]))
Scr[1, 0:3] = t[0:3].flatten()
t = ScreenLoc @ fsr.TAAtoTM(np.array([-imgT[0], -imgT[1], 0, 0, 0, 0]))
Scr[3, 0:3] = t[0:3].flatten()
t = ScreenLoc @ fsr.TAAtoTM(np.array([imgT[0], -imgT[1], 0, 0, 0, 0]))
Scr[2, 0:3] = t[0:3].flatten()
for i in range(4):
ax.plot3D((cam.CamT[0],Scr[i, 0]),
(cam.CamT[1],Scr[i, 1]),
(cam.CamT[2],Scr[i, 2]), 'green')
ax.plot3D(np.hstack((Scr[0:4, 0], Scr[0, 0])),
| np.hstack((Scr[0:4, 1], Scr[0, 1])) | numpy.hstack |
from scipy.io import loadmat
import matplotlib.pyplot as plt
from cost_function import compute_cost
import numpy as np
from gradient import gradient
from scipy.optimize import minimize
filename = 'ex5data1.mat'
data = loadmat(filename)
# Training set
x, y = data['X'], data['y'].flatten()
# Validation set
xval, yval = data['Xval'], data['yval'].flatten()
# Test set
xtest, ytest = data['Xtest'], data['ytest'].flatten()
# Plot all of the data
plt.scatter(x, y, c='r', s=25, marker='x', label="Training Data")
plt.scatter(xval, yval, c='g', s=25, label="Validation Data")
plt.scatter(xtest, ytest, c='b', s=25, label="Test Data")
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.legend()
plt.legend()
plt.show()
theta = | np.zeros(x.shape[1] + 1) | numpy.zeros |
#!/usr/bin/python3
import sys
import string
import time
import numpy as np
import datetime
from . import ivi
from . import usbtmc
from multiprocessing import Process, Queue, cpu_count
import multiprocessing
from scipy.optimize import leastsq,broyden1
from scipy import stats
from PyQt5 import QtCore
from PyQt5.QtWidgets import *
import pyqtgraph
# importing this after pyqt5 tells pyqtgraph to use qt5 instead of 4
channel_assignment = {1: "nothing", 2: "internal voltage", 3: "current", 4: "nothing"}
sim = False
volcal = 2250
volcal_std = 50
resistance = 4.2961608775
frequency = 13560000
result_queue = Queue(100)
voltage_ref_phase = 0
voltage_ref_phase_std = 0
current_ref_phase = 0
current_ref_phase_std = 0
ref_size = 10 # Number of phase reference points to average over
scope_id = None
def get_scope(scope_id):
"Scope database. Add yours here!"
device = usbtmc.Instrument(scope_id)
idV = device.idVendor
idP = device.idProduct
device.close()
if idV == 0x0957 and idP == 0x175D:
scope = ivi.agilent.agilentMSO7104B(scope_id)
# Lecroy scopes, seems to work for multiple models which send the same idP
# tested for WR8404M, HDO6104A
elif idV == 0x05ff and idP == 0x1023:
scope = ivi.lecroy.lecroyWR8404M(scope_id)
elif idV == 0x0957 and idP == 6042: # York, untested
scope = ivi.agilent.agilentDSOX2004A(scope_id)
else:
scope = ivi.lecroy.lecroyWR8404M(scope_id) # your IVI scope here!
return scope
class QHLine(QFrame):
def __init__(self):
super(QHLine, self).__init__()
self.setFrameShape(QFrame.HLine)
self.setFrameShadow(QFrame.Sunken)
class main_window(QWidget):
def __init__(self):
super().__init__()
l_main_Layout = QHBoxLayout()
this_data_monitor = data_monitor()
this_ctrl_panel = ctrl_panel()
l_main_Layout.addLayout(this_data_monitor)
l_main_Layout.addLayout(this_ctrl_panel)
self.rand_data = np.random.normal(size=100)
self.setLayout(l_main_Layout)
self.setGeometry(300, 300, 1000, 450)
self.setWindowTitle("COST Power Monitor")
self.show()
class data_monitor(QVBoxLayout):
def __init__(self):
super().__init__()
self.results = []
self.tab_bar = QTabWidget()
pyqtgraph.setConfigOption('background', 'w')
pyqtgraph.setConfigOption('foreground', 'k')
self.graph = pyqtgraph.PlotWidget(name='Plot1')
self.graph.setLabel("left","power / W")
self.graph.setLabel("bottom","voltage / V")
self.table = QTableWidget()
self.table.setColumnCount(5)
self.table.setHorizontalHeaderLabels(["Voltage / V", "Current / A",
"Phaseshift / rad", "Power / W", "Time"])
self.tab_bar.addTab(self.table, "Table")
self.tab_bar.addTab(self.graph, "Graph")
self.update_timer = QtCore.QTimer(self)
self.update_timer.setInterval(100)
self.update_timer.timeout.connect(self.update)
self.update_timer.start()
btn_layout = QHBoxLayout()
clear_btn = QPushButton("Clear")
clear_btn.clicked.connect(self.clear_data)
save_btn = QPushButton("Save to Disk")
save_btn.clicked.connect(self.save_data)
copy_btn = QPushButton("Copy to Clipboard")
copy_btn.clicked.connect(self.copy_data)
plot_btn = QPushButton("Plot Data")
plot_btn.clicked.connect(self.update_graph)
btn_layout.addWidget(clear_btn)
btn_layout.addWidget(plot_btn)
btn_layout.addWidget(copy_btn)
btn_layout.addWidget(save_btn)
self.power_dspl = QLabel("0 W")
self.addWidget(self.power_dspl)
self.addWidget(self.tab_bar)
self.addLayout(btn_layout)
def clear_data(self):
global result_queue
result_queue.close()
result_queue = Queue(100)
self.table.setRowCount(0)
self.results = []
def save_data(self):
seperator = "\t "
next_line = " \n"
filename = QFileDialog.getSaveFileName(caption='Save File',
filter='*.txt')
if filename[0]:
phaseshift = (str(voltage_ref_phase - current_ref_phase) + " +- " +
str(voltage_ref_phase_std + current_ref_phase_std))
header = ("## cost-power-monitor file ## \n"+
"# " + str(datetime.datetime.now()) + "\n" +
"# Reference phaseshift: " + phaseshift + "\n" +
"# Calibration factor: " + str(volcal) + "\n" +
"# Channel Settings: " + str(channel_assignment) + "\n\n")
table_header = ("Voltage" + seperator + "Current" + seperator +
"Phaseshift" + seperator + "Power" + seperator + "Time" + next_line)
lines = [header, table_header]
for x in range(self.table.rowCount()):
this_line = ""
for y in range(self.table.columnCount()):
this_line = this_line + str(self.table.item(x,y).text()) + seperator
lines.append(this_line + next_line)
try:
f = open(filename[0], 'w')
f.writelines(lines)
except:
mb = QMessageBox()
mb.setIcon(QMessageBox.Information)
mb.setWindowTitle('Error')
mb.setText('Could not save file.')
mb.setStandardButtons(QMessageBox.Ok)
mb.exec_()
def copy_data(self):
QApplication.clipboard().setText(np.array2string(np.array(self.results)))
def update(self):
while not result_queue.empty():
new_data = result_queue.get()
if new_data:
self.results.append(new_data)
self.update_table(new_data)
self.update_power_dspl(new_data[-1])
def update_power_dspl(self, power):
self.power_dspl.setText("Power: " + str(round(power,3)) + " W")
def update_graph(self):
"""Updates the Graph with new data,
this data beeing an 2 dim array of voltage and power"""
self.graph.clear()
if self.results:
voltage = np.array(self.results)[:,0]
power = np.array(self.results)[:,3]
self.graph.plot(title="power", x=voltage, y=power, symbol='o')
def update_table(self,data):
"""Updates the table with new data.
Data is array with voltage, current, phaseshift and power"""
#print(data)
self.table.insertRow(self.table.rowCount())
for i,d in enumerate(data):
if i == 2:
r = 10 # round phaseshift very precise
else:
r = 3 # rest to third position after comma
self.table.setItem(self.table.rowCount()-1,i,QTableWidgetItem(str(round(d,r))))
time = datetime.datetime.now().time().strftime("%H:%M:%S")
self.table.setItem(self.table.rowCount()-1,self.table.columnCount()-1,QTableWidgetItem(str(time)))
self.table.scrollToBottom()
class ctrl_panel(QVBoxLayout):
def __init__(self):
super().__init__()
self.tab_bar = QTabWidget()
this_sweep_tab = sweep_tab()
this_settings_tab = settings_tab()
self.tab_bar.addTab(this_sweep_tab, "Sweep")
self.tab_bar.addTab(this_settings_tab, "Settings")
self.addWidget(self.tab_bar)
class sweep_tab(QWidget):
def __init__(self):
""" Don't look at it!"""
super().__init__()
l_main_Layout = QVBoxLayout()
self.sweeping = False
# Power stuff
power_group = QGroupBox()
power_layout = QVBoxLayout()
power_group.setLayout(power_layout)
show_power_row = QHBoxLayout()
show_power_row.addWidget(QLabel("Start/Pause Measurement"))
power_layout.addLayout(show_power_row)
power_btn_row = QHBoxLayout()
power_start_btn = QPushButton("Start")
power_start_btn.clicked.connect(self.start_sweep)
power_stop_btn = QPushButton("Pause")
power_stop_btn.clicked.connect(self.stop_sweep)
power_btn_row.addWidget(power_start_btn)
power_btn_row.addWidget(power_stop_btn)
power_layout.addLayout(power_btn_row)
l_main_Layout.addWidget(power_group)
# Reference stuff
ref_group = QGroupBox()
ref_layout = QVBoxLayout()
ref_group.setLayout(ref_layout)
show_ref_row = QHBoxLayout()
self.ref_label = QLabel("Undef")
show_ref_row.addWidget(QLabel("Reference Phaseshift:"))
show_ref_row.addWidget(self.ref_label)
ref_layout.addLayout(show_ref_row)
ref_btn_row = QHBoxLayout()
ref_start_btn = QPushButton("Find")
ref_start_btn.clicked.connect(self.find_ref)
ref_btn_row.addWidget(ref_start_btn)
ref_layout.addLayout(ref_btn_row)
l_main_Layout.addWidget(ref_group)
self.setLayout(l_main_Layout)
def start_sweep(self):
if not self.sweeping:
self.this_sweep = sweeper(channel_assignment, volcal, voltage_ref_phase, current_ref_phase)
self.this_sweep.start()
self.sweeping = True
def stop_sweep(self):
self.sweeping = False
self.this_sweep.stop()
def find_ref(self):
if not self.sweeping:
global voltage_ref_phase, current_ref_phase, voltage_ref_phase_std, current_ref_phase_std
self.this_sweep = sweeper(channel_assignment, volcal, voltage_ref_phase, current_ref_phase)
voltage_ref_phase, current_ref_phase, voltage_ref_phase_std, current_ref_phase_std = self.this_sweep.find_ref()
self.ref_label.setText(
str(round(voltage_ref_phase - current_ref_phase,10))
+ " ± "
+ str(round(voltage_ref_phase_std + current_ref_phase_std, 10)))
class settings_tab(QWidget):
def __init__(self):
super().__init__()
l_main_Layout = QVBoxLayout()
# list of connected scopes
self.scope_cbox = QComboBox()
self.scope_list()
# UI to select the scope
scope_group = QGroupBox()
scope_layout = QVBoxLayout()
scope_group.setLayout(scope_layout)
scope_sel_row = QHBoxLayout()
scope_info_row = QHBoxLayout()
scope_sel_row.addWidget(QLabel("Oscilloscope"))
scope_sel_row.addWidget(self.scope_cbox)
self.scope_cbox.setCurrentIndex(0)
self.scope_cbox.currentIndexChanged.connect(self.change_scope)
update_btn = QPushButton("Scan")
scope_sel_row.addWidget(update_btn)
self.scope_name = QLabel(" ")
scope_info_row.addWidget(self.scope_name)
self.change_scope()
scope_layout.addLayout(scope_sel_row)
scope_layout.addLayout(scope_info_row)
l_main_Layout.addWidget(scope_group)
l_main_Layout.addWidget(QHLine())
# UI to assign scope channels
chan_group = QGroupBox()
chan_layout = QVBoxLayout()
chan_group.setLayout(chan_layout)
chan_rows = []
for channel_num in range(1,5):
this_channel = channel_settings(channel_num)
chan_rows.append(this_channel)
chan_layout.addLayout(this_channel)
l_main_Layout.addWidget(chan_group)
l_main_Layout.addWidget(QHLine())
# UI to set or find voltage Calibration factor
volcal_group = QGroupBox()
volcal_layout = QVBoxLayout()
volcal_group.setLayout(volcal_layout)
volcal_row = QHBoxLayout()
self.volcal_box = QLineEdit(str(volcal))
self.volcal_box.setMaximumWidth(100)
self.volcal_box.textChanged.connect(self.change_volcal)
self.volcal_std_label = QLabel()
volcal_get = QPushButton("Find")
volcal_get.clicked.connect(self.get_volcal)
volcal_row.addWidget(QLabel("Calibration Factor: "))
volcal_row.addWidget(self.volcal_box)
volcal_row.addWidget(self.volcal_std_label)
volcal_row.addWidget(volcal_get)
volcal_layout.addLayout(volcal_row)
l_main_Layout.addWidget(volcal_group)
self.setLayout(l_main_Layout)
# monitor changes in scopelist
update_btn.clicked.connect(self.scope_list)
def change_scope(self):
global scope_id
idx = self.scope_cbox.currentIndex()
try:
device = self.devices[idx]
scope_id = "USB::%d::%d::INSTR" % (device.idVendor, device.idProduct)
manufacturer = device.manufacturer
product = device.product
except Exception as e:
print(e)
device = None
scope_id = None
manufacturer = ""
product = ""
try:
scope = get_scope(scope_id)
scope.close()
scope_known = True
mark = "✓"
except Exception as e:
print(e)
scope_known = False
mark = "✗"
self.scope_name.setText(mark + " " + manufacturer + " " + product)
def scope_list(self):
# list of connected USB devices
sel_entry = self.scope_cbox.currentText()
devices = usbtmc.list_devices()
dlist = []
for device in devices:
scope_idVendor = device.idVendor
scope_idProduct = device.idProduct
scope_label = (hex(scope_idVendor) + ":" + hex(scope_idProduct))
dlist.append(scope_label)
self.dlist, self.devices = dlist, devices
self.scope_cbox.clear()
self.scope_cbox.addItems(dlist)
idx = self.scope_cbox.findText(sel_entry)
if idx == -1:
try:
self.scope_cbox.setCurrentIndex(0)
except:
pass
else:
self.scope_cbox.setCurrentIndex(idx)
def change_volcal(self):
global volcal
volcal = float(self.volcal_box.text())
def get_volcal(self):
self.this_sweep = sweeper(channel_assignment, volcal, voltage_ref_phase, current_ref_phase)
try:
self.volcal_box.setText(str(round(self.this_sweep.calibrate(),1)))
except Exception as e:
print(e)
if type(volcal_std) == int:
self.volcal_std_label.setText("±" + str(round(volcal_std,1)))
else:
self.volcal_std_label.setText(str(volcal_std))
class channel_settings(QHBoxLayout):
def __init__(self, number):
"""Beware, Channels are numbered 1 to 4"""
super().__init__()
self.number = number
self.addWidget(QLabel("Channel " + str(self.number)))
self.chan_cbox = QComboBox()
chan_options = ["nothing", "internal voltage", "current", "external voltage"]
self.chan_cbox.addItems(chan_options)
self.addWidget(self.chan_cbox)
self.chan_cbox.setCurrentIndex(chan_options.index(channel_assignment[self.number]))
self.chan_cbox.currentIndexChanged.connect(self.change_channel)
def change_channel(self):
global channel_assignment
this_chan_ass = channel_assignment
this_chan_ass[self.number] = self.chan_cbox.currentText()
channel_assignment = this_chan_ass
class sweeper():
def __init__(self, channels, volcal, v_ref, c_ref):
global result_queue
mgr = multiprocessing.Manager()
self.channels = channels
self.volcal = volcal
self.v_ref = v_ref
self.c_ref = c_ref
self.data_queue = mgr.Queue(ref_size)
self.io_process = Process(target=self.io_worker, args=(self.data_queue, scope_id))
self.fit_process_list = []
for i in range(cpu_count()-1):
this_fit_proccess = Process(target=fit_worker,
args=(self.data_queue, result_queue, volcal, v_ref, c_ref))
self.fit_process_list.append(this_fit_proccess)
def start(self):
if not self.io_process.is_alive():
self.io_process.start()
for fit_process in self.fit_process_list:
if not fit_process.is_alive():
fit_process.start()
def stop(self):
if self.io_process.is_alive():
self.io_process.terminate()
for fit_process in self.fit_process_list:
while not self.data_queue.empty() and fit_process.is_alive():
time.sleep(1)
if fit_process.is_alive():
fit_process.terminate()
while not self.data_queue.empty():
self.data_queue.get()
def calibrate(self):
global volcal, volcal_std
ref_queue = Queue(ref_size*2) # Don't ask
self.io_process.start()
volcal_list = []
for i in range(ref_size):
data_dict = self.data_queue.get()
try:
external_voltage_data = data_dict["external voltage"]
except KeyError:
print("Channel 'External Voltage' not set.")
volcal_std = "Error, 'External Voltage' not set."
self.io_process.terminate()
return 0
voltage_data = data_dict["internal voltage"]
v_amp, v_freq, v_phase = fit_func(voltage_data)
ext_v_amp, ext_v_freq, ext_v_phase = fit_func(external_voltage_data)
volcal_list.append(ext_v_amp/v_amp)
self.io_process.terminate()
while not self.data_queue.empty():
self.data_queue.get()
volcal = np.average(volcal_list)
volcal_std = np.std(volcal_list)
return volcal
def find_ref(self):
ref_queue = Queue(ref_size*2) # Don't ask
self.io_process.start()
v_phases = []
c_phases = []
for i in range(ref_size):
data_dict = self.data_queue.get()
voltage_data = data_dict["internal voltage"]
v_amp, v_freq, v_phase = fit_func(voltage_data)
current_data = data_dict["current"]
c_amp, c_freq, c_phase = fit_func(current_data)
v_phases.append(v_phase)
c_phases.append(c_phase)
self.io_process.terminate()
while not self.data_queue.empty():
self.data_queue.get()
# Getting the average of an angle is hard:
# https://en.wikipedia.org/wiki/Mean_of_circular_quantities
mean_v_phase = np.arctan2(
np.sum(np.sin(np.array(v_phases)))/len(v_phases),
np.sum(np.cos(np.array(v_phases)))/len(v_phases)
) % (2*np.pi)
mean_c_phase = np.arctan2(
np.sum(np.sin(np.array(c_phases)))/len(c_phases),
np.sum(np.cos(np.array(c_phases)))/len(c_phases)
) % (2*np.pi)
v_phase_diff_sum = 0
c_phase_diff_sum = 0
for angle in v_phases:
# Next line seems to work. It's all very complicated.
v_phase_diff_sum = (v_phase_diff_sum
+ np.square(np.diff(np.unwrap([angle, mean_v_phase])))[0])
v_phase_std = np.sqrt(v_phase_diff_sum/len(v_phases))
for angle in c_phases:
# Next line seems to work. It's all very complicated.
c_phase_diff_sum = (c_phase_diff_sum
+ np.square(np.diff(np.unwrap([angle, mean_c_phase])))[0])
c_phase_std = np.sqrt(c_phase_diff_sum/len(c_phases))
global voltage_ref_phase, voltage_ref_phase_std
voltage_ref_phase = mean_v_phase
voltage_ref_phase_std = v_phase_std
global current_ref_phase, current_ref_phase_std
current_ref_phase = mean_c_phase
current_ref_phase_std = c_phase_std
self.v_ref = voltage_ref_phase
self.c_ref = current_ref_phase
return (voltage_ref_phase, current_ref_phase, voltage_ref_phase_std, current_ref_phase_std)
def io_worker(self, data_queue, scope_id):
""" Gets waveforms from the scope and puts them into the data_queue."""
device = usbtmc.Instrument(scope_id)
idV = device.idVendor
device.close()
scope = get_scope(scope_id)
while True and not sim:
data_dict = {}
if idV == 0x0957: # Agilent scopes want to be initialized (tested for DSO7104B)
scope.measurement.initiate()
for chan_num in self.channels:
chan_name = self.channels[chan_num]
if chan_name != "nothing":
data_dict[chan_name] = scope.channels[chan_num-1].measurement.fetch_waveform()
data_queue.put(data_dict)
def fit_worker(data_queue, result_queue, volcal, v_ref, c_ref):
"""Takes data_queue and fits a sinus. Returns 4-tuple of voltage,current, phaseshift and power if raw=False,
else a 6 tuple of amp, freq and phase for both voltage and current.
Returns a 2-tuple if cal=True: internal voltage amplitude, external voltage amplitude.
Use num to restict the amount of data the worker should fetech.
Use cal to Calibration internal/external voltage probe"""
while True:
data_dict = data_queue.get()
voltage_data = data_dict["internal voltage"]
v_amp, v_freq, v_phase = fit_func(voltage_data)
voltage_rms = v_amp/np.sqrt(2) * volcal
current_data = data_dict["current"]
c_amp, c_freq, c_phase = fit_func(current_data)
current_rms = c_amp/np.sqrt(2)/resistance
phaseshift = np.pi/2 + (c_ref - c_phase) - (v_ref - v_phase)
power = voltage_rms * current_rms * np.absolute(np.cos(phaseshift))
voltage_rms = v_amp/np.sqrt(2) * volcal
result = (voltage_rms, current_rms, phaseshift, power)
result_queue.put(result)
def fit_func(data):
data = np.array(data)
time = np.nan_to_num(data[:,0])
amplitude = np.nan_to_num(data[:,1])
guess_mean = np.mean(amplitude)
guess_amplitude = | np.amax(amplitude) | numpy.amax |
import os
import dlib
import numpy as np
import math
import cv2
from .utils import download_url, extract_file
class Detector:
def __init__(self, predictor_path=None):
self.detector = dlib.get_frontal_face_detector()
self.set_predictor(predictor_path)
def set_predictor(self, predictor_path):
if predictor_path is None:
from .constants import predictor_file
predictor_path = os.path.join(os.path.dirname(__file__), predictor_file)
if not os.path.exists(predictor_path):
from .constants import predictor_url
os.makedirs(os.path.dirname(predictor_path), exist_ok=True)
download_url(predictor_url, save_path=os.path.dirname(predictor_path))
extract_file(predictor_path + '.bz2', os.path.dirname(predictor_path))
self.predictor = dlib.shape_predictor(predictor_path)
def detect_and_crop(self, img, img_size=512):
dets = self.detector(img, 1) # Take a single detection
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
dets = dets[0]
shape = self.predictor(img, dets)
points = shape.parts()
pts = np.array([[p.x, p.y] for p in points])
min_x = np.min(pts[:, 0])
min_y = np.min(pts[:, 1])
max_x = np.max(pts[:, 0])
max_y = np.max(pts[:, 1])
box_width = (max_x - min_x) * 1.2
box_height = (max_y - min_y) * 1.2
bbox = np.array([min_y - box_height * 0.3, min_x, box_height, box_width]).astype(np.int)
img_crop = Detector.adjust_box_and_crop(img, bbox, crop_percent=150, img_size=img_size)
# img_crop = img[bbox[0]:bbox[0]+bbox[2], bbox[1]:bbox[1]+bbox[3], :]
return img_crop
@staticmethod
def adjust_box_and_crop(img, bbox, crop_percent=100, img_size=None):
w_ext = math.floor(bbox[2])
h_ext = math.floor(bbox[3])
bbox_center = np.round( | np.array([bbox[0] + 0.5 * bbox[2], bbox[1] + 0.5 * bbox[3]]) | numpy.array |
import numpy
import Shadow
import Shadow.ShadowLibExtensions as sd
import sys
import inspect
import os
try:
import matplotlib.pylab as plt
from matplotlib import collections
except ImportError:
print(sys.exc_info()[1])
pass
#TODO: remove ShadowToolsPrivate
import Shadow.ShadowToolsPrivate as stp
from Shadow.ShadowToolsPrivate import Histo1_Ticket as Histo1_Ticket
from Shadow.ShadowToolsPrivate import plotxy_Ticket as plotxy_Ticket
#A2EV = 50676.89919462
codata_h = numpy.array(6.62606957e-34)
codata_ec = numpy.array(1.602176565e-19)
codata_c = numpy.array(299792458.0)
A2EV = 2.0*numpy.pi/(codata_h*codata_c/codata_ec*1e2)
#TODO: delete. Implemented for beam object
def getshonecol(beam,col):
'''
Extract a column from a shadow file (eg. begin.dat) or a Shadow.Beam instance.
The column are numbered in the fortran convention, i.e. starting from 1.
It returns a numpy.array filled with the values of the chosen column.
Inumpy.ts:
beam : str instance with the name of the shadow file to be loaded. OR
Shadow.Beam initialized instance.
col : int for the chosen columns.
Outputs:
numpy.array 1-D with length numpy.INT.
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 Xp direction or divergence [rads]
5 Yp direction or divergence [rads]
6 Zp direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
try: stp.getshonecol_CheckArg(beam,col)
except stp.ArgsError as e: raise e
col=col-1
if isinstance(beam,sd.Beam):
ray = beam.rays
else:
bm = sd.Beam()
bm.load(beam)
ray = bm.rays
if col>=0 and col<18 and col!=10: column = ray[:,col]
if col==10: column = ray[:,col]/A2EV
if col==18: column = 2*numpy.pi*1.0e8/ray[:,10]
if col==19: column = numpy.sqrt(ray[:,0]*ray[:,0]+ray[:,1]*ray[:,1]+ray[:,2]*ray[:,2])
if col==20: column = numpy.arccos(ray[:,4])
if col==21: column = numpy.sqrt(numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8,15,16,17] ]),axis=0))
if col==22: column = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8,15,16,17] ]),axis=0)
if col==23: column = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
if col==24: column = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
if col==25: column = ray[:,10]*1.0e8
if col==26: column = ray[:,3]*ray[:,10]*1.0e8
if col==27: column = ray[:,4]*ray[:,10]*1.0e8
if col==28: column = ray[:,5]*ray[:,10]*1.0e8
if col==29:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
column = E2p+E2s
if col==30:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
column = E2p-E2s
if col==31:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
Cos = numpy.cos(ray[:,13]-ray[:,14])
column = 2*E2s*E2p*Cos
if col==32:
E2s = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [6,7,8] ]),axis=0)
E2p = numpy.sum(numpy.array([ ray[:,i]*ray[:,i] for i in [15,16,17] ]),axis=0)
Sin = numpy.sin(ray[:,13]-ray[:,14])
column = 2*E2s*E2p*Sin
return column
#TODO: delete. Implemented for beam object
def getshcol(beam,col):
'''
Extract multiple columns from a shadow file (eg.'begin.dat') or a Shadow.Beam instance.
The column are numbered in the fortran convention, i.e. starting from 1.
It returns a numpy.array filled with the values of the chosen column.
Inumpy.ts:
beam : str instance with the name of the shadow file to be loaded. OR
Shadow.Beam initialized instance.
col : tuple or list instance of int with the number of columns chosen.
Outputs:
numpy.array 2-D with dimension R x numpy.INT. Where R is the total number of column chosen
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 X' direction or divergence [rads]
5 Y' direction or divergence [rads]
6 Z' direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
try: stp.getshcol_CheckArg(beam,col)
except stp.ArgsError as e: raise e
if isinstance(beam,sd.Beam):
bm = beam
else:
bm = sd.Beam()
bm.load(beam)
ret = []
if isinstance(col, int): return getshonecol(bm,col)
for c in col:
ret.append(getshonecol(bm,c))
return tuple(ret)
def histo1(beam, col, notitle=0, nofwhm=0, bar=0, **kwargs):
"""
Plot the histogram of a column, as calculated by Shadow.Beam.histo1 using matplotlib
NOTE: This will replaces the old histo1 still available as histo1_old
:param beam: a Shadow.Beam() instance, or a file name with Shadow binary file
:param col: the Shadow column number (start from 1)
:param notitle: set to 1 to avoid displaying title
:param nofwhm: set to 1 to avoid labeling FWHM value
:param bar: 1=bar plot, 0=line plot
:param kwargs: keywords accepted by Shadow.Beam.histo1()
:return: the dictionary returned by Shadow.beam.histo1() with some keys added.
"""
title = "histo1"
if isinstance(beam,str):
beam1 = sd.Beam()
beam1.load(beam)
title += " - file: "+beam
beam = beam1
tk2 = beam.histo1(col, **kwargs)
h = tk2["histogram"]
bins = tk2["bin_left"]
xrange = tk2["xrange"]
yrange = [0,1.1*numpy.max(h)]
fwhm = tk2["fwhm"]
xtitle = "column %d"%tk2["col"]
ytitle = "counts ("
if tk2["nolost"] == 0:
ytitle += " all rays"
if tk2["nolost"] == 1:
ytitle += " good rays"
if tk2["nolost"] == 2:
ytitle += " lost rays"
if tk2["ref"] == 0:
ytitle += " = weight: number of rays"
else:
if tk2["ref"] == 23:
ytitle += " - weight: intensity"
else:
ytitle += " - weight column: %d"%(tk2["ref"])
ytitle += ")"
if fwhm != None: print ("fwhm = %g" % fwhm)
fig0 = plt.figure()
ax = fig0.add_subplot(111)
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
if notitle != 1: ax.set_title(title)
ax.set_xlim(xrange[0],xrange[1])
ax.set_ylim(yrange[0],yrange[1])
ax.grid(True)
if bar:
l = ax.bar(bins, h, 1.0*(bins[1]-bins[0]),color='blue') #,error_kw=dict(elinewidth=2,ecolor='red'))
else:
l = plt.plot(tk2["bin_path"], tk2["histogram_path"], color='blue') #,error_kw=dict(elinewidth=2,ecolor='red'))
if tk2["fwhm"] != None:
hh = 0.5*numpy.max(tk2["histogram"])
lines = [ [ (tk2["fwhm_coordinates"][0],hh), \
(tk2["fwhm_coordinates"][1],hh) ]]
lc = collections.LineCollection(lines,color='red',linewidths=2)
ax.add_collection(lc)
if nofwhm != 1:
if tk2["fwhm_coordinates"][0] < 0:
shift1 = 0.9
else:
shift1 = 1.0
ax.annotate('FWHM=%f'%tk2["fwhm"], xy=(shift1*tk2["fwhm_coordinates"][0],1.01*tk2["fwhm_coordinates"][0]))
plt.show()
return tk2
#TODO: delete. Reimplemented using Shadow.beam.histo1()
def histo1_old(beam,col,xrange=None,yrange=None,nbins=50,nolost=0,ref=0,write=0,title='HISTO1',xtitle=None,ytitle=None,calfwhm=0,noplot=0):
'''
Plot the histogram of a column, simply counting the rays, or weighting with the intensity.
It returns a ShadowTools.Histo1_Ticket which contains the histogram data, and the figure.
Inumpy.ts:
beam : str instance with the name of the shadow file to be loaded, or a Shadow.Beam initialized instance.
col : int for the chosen column.
Optional Inumpy.ts:
xrange : tuple or list of length 2 describing the interval of interest for x, the data read from the chosen column.
yrange : tuple or list of length 2 describing the interval of interest for y, counts or intensity depending on ref.
nbins : number of bins of the histogram.
nolost :
0 All rays
1 Only good rays
2 Only lost rays
ref :
0, None, "no", "NO" or "No": only count the rays
23, "Yes", "YES" or "yes": weight with intensity (look at col=23 |E|^2 total intensity)
other value: use that column as weight
write :
0 don't write any file
1 write the histogram into the file 'HISTO1'.
title : title of the figure, it will appear on top of the window.
xtitle : label for the x axis.
ytitle : label for the y axis.
calfwhm :
0 don't compute the fwhm
1 compute the fwhm
noplot :
0 plot the histogram
1 don't plot the histogram
orientation :
'vertical' x axis for data, y for intensity
'horizontal' y axis for data, x for intensity
plotxy :
0 standalone version
1 to use within plotxy
Outputs:
ShadowTools.Histo1_Ticket instance.
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 X' direction or divergence [rads]
5 Y' direction or divergence [rads]
6 Z' direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
try: stp.Histo1_CheckArg(beam,col,xrange,yrange,nbins,nolost,ref,write,title,xtitle,ytitle,calfwhm,noplot)
except stp.ArgsError as e: raise e
col=col-1
if ref==1: ref = 23
#plot_nicc.ioff()
plt.ioff()
figure = plt.figure()
axHist = figure.add_axes([0.1,0.1,0.8,0.8])
if ytitle!=None:
ytitlesave=ytitle
else:
ytitlesave=None
if ref == None: ref = 0
if ref == "No": ref = 0
if ref == "NO": ref = 0
if ref == "no": ref = 0
if ref == "Yes": ref = 23
if ref == "YES": ref = 23
if ref == "yes": ref = 23
if ref == 1:
print("Shadow.ShadowTools.histo1_old: Warning: weighting with column 1 (X) [not with intensity as may happen in old versions]")
if ref==0:
x, a = getshcol(beam,(col+1,10))
w = numpy.ones(len(x))
else:
x, a, w = getshcol(beam,(col+1,10,ref))
if nolost==0:
t = numpy.where(a!=-3299)
ytitle = 'All rays'
if nolost==1:
t = numpy.where(a==1.0)
ytitle = 'Good rays'
if nolost==2:
t = numpy.where(a!=1.0)
ytitle = 'Lost rays'
if len(t[0])==0:
print ("no rays match the selection, the histogram will not be plotted")
return
if ref==0:
ytitle = 'counts ' + ytitle
h,bins,patches = axHist.hist(x[t],bins=nbins,range=xrange,histtype='step',alpha=0.5)
if yrange==None: yrange = [0.0, numpy.max(h)]
hw=h
if ref>=22:
ytitle = (stp.getLabel(ref-1))[0] + ' ' + ytitle
h,bins = numpy.histogram(x[t],range=xrange,bins=nbins)
hw,bins,patches = axHist.hist(x[t],range=xrange, bins=nbins,histtype='step',alpha=0.5,weights=w[t])
if yrange==None: yrange = [0.0, numpy.max(hw)]
fwhm = None
if calfwhm==1:
fwhm, tf, ti = stp.calcFWHM(hw,bins[1]-bins[0])
axHist.plot([bins[ti],bins[tf+1]],[max(h)*0.5,max(h)*0.5],'x-')
print ("fwhm = %g" % fwhm)
if write==1: stp.Histo1_write(title,bins,h,hw,col,beam,ref-1)
if xtitle==None: xtitle=(stp.getLabel(col))[0]
axHist.set_xlabel(xtitle)
if ytitlesave!=None:
axHist.set_ylabel(ytitlesave)
else:
axHist.set_ylabel(ytitle)
if title!=None: axHist.set_title(title)
if xrange!=None: axHist.set_xlim(xrange)
if yrange!=None: axHist.set_ylim(yrange)
if noplot==0:
plt.show()
ticket = Histo1_Ticket()
ticket.histogram = hw
ticket.bin_center = bins[:-1]+(bins[1]-bins[0])*0.5
ticket.bin_left = bins[:-1]
ticket.figure = figure
ticket.xrange = xrange
ticket.yrange = yrange
ticket.xtitle = xtitle
ticket.ytitle = ytitle
ticket.title = title
ticket.fwhm = fwhm
ticket.intensity = w[t].sum()
return ticket
def plotxy_gnuplot(beam,col_h,col_v,execute=1,ps=0,pdf=0,title="",viewer='okular',**kwargs):
"""
A plotxy implemented for gnuplot.
It uses Shadow.beam.histo2() for calculations.
It creates files for gnuplot (plotxy.gpl and plotxy_*.dat)
It can run gnuplot (system call) and display ps or pdf outputs
:param beam: it can be a SHADOW binary file, an instance of Shadow.Beam() or a dictionary from Shadow.Beam.histo2
:param col_h: the H column for the plot. Irrelevant if beam is a dictionary
:param col_v: the V column for the plot. Irrelevant if beam is a dictionary
:param execute: set to 1 to make a system call to execute gnuplot (default=1)
:param ps: set to 1 to get postscript output (irrelevant if pdf=1
:param pdf: set to 1 for pdf output (prioritaire over ps)
:param viewer: set to the ps or pdf viewer (default='okular')
:param kwargs: keywords to be passed to Shadow.beam.histo2()
:return: the dictionary produced by Shadow.beam.histo2 with some keys added
"""
if title == "":
title = "plotxy"
if isinstance(beam,dict):
tkt = beam
col_h = tkt["col_h"]
col_v = tkt["col_v"]
else:
if isinstance(beam,str):
beam1 = sd.Beam()
beam1.load(beam)
title += " - file: "+beam
beam = beam1
tkt = beam.histo2(col_h,col_v,**kwargs)
f = open("plotxy_histtop.dat",'w')
for i in range(tkt["nbins_h"]):
f.write("%12.5f %12.5f \n"%( tkt["bin_h_left"][i], tkt["histogram_h"][i] ))
f.write("%12.5f %12.5f \n"%( tkt["bin_h_right"][i], tkt["histogram_h"][i] ))
f.close()
print("File written to disk: plotxy_histside.dat")
f = open("plotxy_histside.dat",'w')
for i in range(tkt["nbins_v"]):
f.write("%12.5f %12.5f \n"%( tkt["histogram_v"][i], tkt["bin_v_left"][i] ))
f.write("%12.5f %12.5f \n"%( tkt["histogram_v"][i], tkt["bin_v_right"][i] ))
f.close()
print("File written to disk: plotxy_histtop.dat")
f = open("plotxy_grid.dat",'w')
f.write(" # plotxy grid data for plotxy.gpl\n")
f.write(" # Xbin Ybin Weight\n")
for i in range(tkt["nbins_h"]):
for j in range(tkt["nbins_v"]):
f.write("%25.20f %25.20f %25.20f\n"%(tkt["bin_h_center"][i],tkt["bin_v_center"][j], tkt["histogram"][i,j] ))
f.write("\n")
f.close()
print("File written to disk: plotxy_grid.dat")
txt = """
#GnuPlot command file for PLOTXY
#Minimum version: gnuplot 4.2 patchlevel 6
#
{set_terminal}
set multiplot
#
# top histogram
#
set lmargin screen 0.2125
set rmargin screen 0.70
set bmargin screen 0.75
set tmargin screen 0.90
unset xtics
unset x2tics
unset ytics
unset y2tics
unset key
unset xlabel
unset ylabel
unset x2label
unset y2label
set x2tics mirror
set x2label " {title} "
set xrange[ {xrange[0]} : {xrange[1]} ]
set yrange[*:*]
plot "plotxy_histtop.dat" u 1:2 w lines lt -1 notitle
#
# side histogram
#
set lmargin screen 0.10
set rmargin screen 0.2125
set bmargin screen 0.10
set tmargin screen 0.75
unset xtics
unset x2tics
unset ytics
unset y2tics
unset key
unset xlabel
unset ylabel
unset x2label
unset y2label
set ytics
set ylabel "Column {col_v}"
set xrange[*:*]
set yrange[ {yrange[0]} : {yrange[1]} ]
plot "plotxy_histside.dat" u (-$1):2 w lines lt -1 notitle
#
# scattered/contour plot
#
set lmargin screen 0.2125
set rmargin screen 0.70
set bmargin screen 0.10
set tmargin screen 0.75
unset xtics
unset x2tics
unset ytics
unset y2tics
unset key
unset xlabel
unset ylabel
unset x2label
unset y2label
set xlabel "Column {col_h}"
set xrange[ {xrange[0]} : {xrange[1]} ]
set yrange[ {yrange[0]} : {yrange[1]} ]
#
# IF PIXEL UNCOMMENT THIS
#
set pm3d map
set palette gray
splot "./plotxy_grid.dat" u 1:2:3 notitle
#
# info column
#
set obj 10 rect from graph 1.20, graph 1 to graph 1.61, graph 0
set label "{label_id}" at graph 1.21, graph 0.9
set label "{label_good}" at graph 1.21, graph 0.5
set label "TOT = {nrays}" at graph 1.21, graph 0.30
set label "LOST = {lost_rays}" at graph 1.21, graph 0.25
set label "GOOD = {good_rays}" at graph 1.21, graph 0.20
set label "INTENS = {intensity}" at graph 1.21, graph 0.15
set label "{label_weight}" at graph 1.21, graph 0.10
replot
unset multiplot
{set_pause}
"""
#add kws to dictionnary to be used in the template
tkt["set_terminal"] = "set terminal x11 size 900,600"
tkt["set_pause"] = "pause -1 'Press <Enter> to end graphic '"
if ps:
tkt["set_terminal"] = "set terminal postscript \n set output 'plotxy.ps' "
tkt["set_pause"] = ""
if pdf:
tkt["set_terminal"] = "set terminal pdf \n set output 'plotxy.pdf' "
tkt["set_pause"] = ""
tkt["title"] = title
tkt["lost_rays"] = tkt["nrays"] - tkt["good_rays"]
tkt["label_id"] = ""
if os.getenv("USER") is None:
pass
else:
tkt["label_id"] += os.getenv("USER")
if os.getenv("HOST") is None:
pass
else:
tkt["label_id"] += "@"+os.getenv("HOST")
if tkt["ref"] == 0:
tkt["label_weight"] = "WEIGHT: RAYS"
else:
if tkt["ref"] == 1 or tkt["ref"] == 23:
tkt["label_weight"] = "WEIGHT: INTENSITY"
else:
tkt["label_weight"] = "WEIGHT: COLUMN %d"%(tkt["ref"])
if tkt["nolost"] == 0:
tkt["label_good"] = "--ALL RAYS"
elif tkt["nolost"] == 1:
tkt["label_good"] = "--GOOD ONLY"
else:
tkt["label_good"] = "--ONLY LOSSES"
txt2 = txt.format_map(tkt)
f = open("plotxy.gpl",'w')
f.write(txt2)
f.close()
print("File written to disk: plotxy.gpl")
if execute:
os.system("gnuplot plotxy.gpl")
if ps:
os.system(viewer+" plotxy.ps")
if pdf:
os.system(viewer+" plotxy.pdf")
return tkt
def plotxy(beam,col_h,col_v, nofwhm=1, title="", **kwargs):
"""
plotxy implementation using matplotlib.
Calculations are done using Shadow.beam.histo2()
:param beam: it can be a SHADOW binary file, an instance of Shadow.Beam() or a dictionary from Shadow.Beam.histo2
:param col_h: The column for the H coordinate in the plot (irrelevant of beam is a dictionary)
:param col_v: The column for the H coordinate in the plot (irrelevant of beam is a dictionary)
:param nofwhm: set to 0 to label the FWHM value in the plot (default do not label)
:param kwargs: keywrods passed to Shadow.Beam.histo2
:return: the dictionary returned by Shadow.beam.histo2() with some added keys.
"""
if title == "":
title = "plotxy"
if isinstance(beam,dict):
tkt = beam
col_h = tkt["col_h"]
col_v = tkt["col_v"]
else:
if isinstance(beam,str):
beam1 = sd.Beam()
beam1.load(beam)
title += " - file: "+beam
beam = beam1
tkt = beam.histo2(col_h,col_v,**kwargs)
xtitle = "Column %d"%tkt["col_h"]
ytitle = "Column %d"%tkt["col_v"]
figure = plt.figure(figsize=(12,8),dpi=96)
ratio = 8.0/12.0
rect_scatter = [0.10*ratio, 0.10, 0.65*ratio, 0.65]
rect_histx = [0.10*ratio, 0.77, 0.65*ratio, 0.20]
rect_histy = [0.77*ratio, 0.10, 0.20*ratio, 0.65]
rect_text = [1.00*ratio, 0.10, 1.20*ratio, 0.65]
#
#main plot
#
axScatter = figure.add_axes(rect_scatter)
axScatter.set_xlabel(xtitle)
axScatter.set_ylabel(ytitle)
# axScatter.set_xlim(tkt["xrange"])
# axScatter.set_ylim(tkt["yrange"])
axScatter.axis(xmin=tkt["xrange"][0],xmax=tkt["xrange"][1])
axScatter.axis(ymin=tkt["yrange"][0],ymax=tkt["yrange"][1])
#axScatter.pcolor(tkt["bin_h_edges"], tkt["bin_v_edges"], tkt["histogram"].T)
axScatter.pcolormesh(tkt["bin_h_edges"], tkt["bin_v_edges"], tkt["histogram"].T)
for tt in axScatter.get_xticklabels():
tt.set_size('x-small')
for tt in axScatter.get_yticklabels():
tt.set_size('x-small')
#
#histograms
#
axHistx = figure.add_axes(rect_histx, sharex=axScatter)
axHisty = figure.add_axes(rect_histy, sharey=axScatter)
#for practical purposes, writes the full histogram path
tmp_h_b = []
tmp_h_h = []
for s,t,v in zip(tkt["bin_h_left"],tkt["bin_h_right"],tkt["histogram_h"]):
tmp_h_b.append(s)
tmp_h_h.append(v)
tmp_h_b.append(t)
tmp_h_h.append(v)
tmp_v_b = []
tmp_v_h = []
for s,t,v in zip(tkt["bin_v_left"],tkt["bin_v_right"],tkt["histogram_v"]):
tmp_v_b.append(s)
tmp_v_h.append(v)
tmp_v_b.append(t)
tmp_v_h.append(v)
axHistx.plot(tmp_h_b,tmp_h_h)
axHisty.plot(tmp_v_h,tmp_v_b)
for tl in axHistx.get_xticklabels(): tl.set_visible(False)
for tl in axHisty.get_yticklabels(): tl.set_visible(False)
for tt in axHisty.get_xticklabels():
tt.set_rotation(270)
tt.set_size('x-small')
for tt in axHistx.get_yticklabels():
tt.set_size('x-small')
if tkt["fwhm_h"] != None:
hh = 0.5*numpy.max(tkt["histogram_h"])
lines = [ [ (tkt["fwhm_coordinates_h"][0],hh), \
(tkt["fwhm_coordinates_h"][1],hh) ]]
lc = collections.LineCollection(lines,color='red',linewidths=2)
axHistx.add_collection(lc)
if nofwhm != 1:
if tkt["fwhm_coordinates_h"][0] < 0:
shift1 = 0.9
else:
shift1 = 1.0
axHistx.annotate('FWHM=%f'%tkt["fwhm_h"], xy=(shift1*tkt["fwhm_coordinates_h"][0],1.01*hh))
if tkt["fwhm_v"] != None:
hh = 0.5*numpy.max(tkt["histogram_v"])
lines = [ [ (hh,tkt["fwhm_coordinates_v"][0]), \
(hh,tkt["fwhm_coordinates_v"][1]) ]]
lc = collections.LineCollection(lines,color='green',linewidths=2)
axHisty.add_collection(lc)
if nofwhm != 1:
if tkt["fwhm_coordinates_v"][0] < 0:
shift1 = 0.9
else:
shift1 = 1.0
axHisty.annotate('FWHM=%f'%tkt["fwhm_v"], xy=(shift1*tkt["fwhm_coordinates_v"][0],1.01*hh))
if title!=None:
axHistx.set_title(title)
axText = figure.add_axes(rect_text)
if tkt["nolost"] == 0: axText.text(0.0,0.8,"ALL RAYS")
if tkt["nolost"] == 1: axText.text(0.0,0.8,"GOOD RAYS")
if tkt["nolost"] == 2: axText.text(0.0,0.8,"LOST RAYS")
#tmps = "intensity: %f"%(tkt["intensity"])
axText.text(0.0,0.7,"intensity: %8.2f"%(tkt["intensity"]))
axText.text(0.0,0.6,"total number of rays: "+str(tkt["nrays"]))
axText.text(0.0,0.5,"total good rays: "+str(tkt["good_rays"]))
axText.text(0.0,0.4,"total lost rays: "+str(tkt["nrays"]-tkt["good_rays"]))
calfwhm = 1
if tkt["fwhm_h"] != None:
axText.text(0.0,0.3,"fwhm H: "+str(tkt["fwhm_h"]))
if tkt["fwhm_v"] != None:
axText.text(0.0,0.2,"fwhm V: "+str(tkt["fwhm_v"]))
if isinstance(beam,str): axText.text(0.0,0.1,"FILE: "+beam)
if isinstance(beam,sd.Beam): axText.text(0.0,0.1,"from Shadow.Beam instance")
if tkt["ref"] == 0:
axText.text(0.0,0.0,"WEIGHT: RAYS")
else:
axText.text(0.0,0.0,"WEIGHT: INTENSITY")
axText.set_axis_off()
plt.show()
return tkt
#TODO: delete. Reimplemented using Shadow.Beam.histo2()
def plotxy_old(beam,cols1,cols2,nbins=25,nbins_h=None,level=5,xrange=None,yrange=None,nolost=0,title='PLOTXY',xtitle=None,ytitle=None,noplot=0,calfwhm=0,contour=0):
'''
Draw the scatter or contour or pixel-like plot of two columns of a Shadow.Beam instance or of a given shadow file, along with histograms for the intensity on the top and right side.
Inumpy.ts:
beam : str instance with the name of the shadow file to be loaded, or a Shadow.Beam initialized instance.
cols1 : first column.
cols2 : second column.
Optional Inumpy.ts:
nbins : int for the size of the grid (nbins x nbins). It will affect the plot only if non scatter.
nbins_h : int for the number of bins for the histograms
level : int number of level to be drawn. It will affect the plot only if contour.
xrange : tuple or list of length 2 describing the interval of interest for x, the data read from the chosen column.
yrange : tuple or list of length 2 describing the interval of interest for y, counts or intensity depending on ref.
nolost :
0 All rays
1 Only good rays
2 Only lost rays
title : title of the figure, it will appear on top of the window.
xtitle : label for the x axis.
ytitle : label for the y axis.
noplot :
0 plot the histogram
1 don't plot the histogram
calfwhm :
0 don't compute the fwhm
1 compute the fwhm and draw it
2 in addition to calfwhm=1, it computes now the intensity in a
slit of FWHM_h x FWHM_v
contour :
0 scatter plot
1 contour, black & white, only counts (without intensity)
2 contour, black & white, with intensity.
3 contour, colored, only counts (without intensity)
4 contour, colored, with intensity.
5 pixelized, colored, only counts (without intensity)
6 pixelized, colored, with intensity.
Outputs:
ShadowTools.Histo1_Ticket instance.
Error:
if an error occurs an ArgsError is raised.
Possible choice for col are:
1 X spatial coordinate [user's unit]
2 Y spatial coordinate [user's unit]
3 Z spatial coordinate [user's unit]
4 X' direction or divergence [rads]
5 Y' direction or divergence [rads]
6 Z' direction or divergence [rads]
7 X component of the electromagnetic vector (s-polariz)
8 Y component of the electromagnetic vector (s-polariz)
9 Z component of the electromagnetic vector (s-polariz)
10 Lost ray flag
11 Energy [eV]
12 Ray index
13 Optical path length
14 Phase (s-polarization)
15 Phase (p-polarization)
16 X component of the electromagnetic vector (p-polariz)
17 Y component of the electromagnetic vector (p-polariz)
18 Z component of the electromagnetic vector (p-polariz)
19 Wavelength [A]
20 R= SQRT(X^2+Y^2+Z^2)
21 angle from Y axis
22 the magnituse of the Electromagnetic vector
23 |E|^2 (total intensity)
24 total intensity for s-polarization
25 total intensity for p-polarization
26 K = 2 pi / lambda [A^-1]
27 K = 2 pi / lambda * col4 [A^-1]
28 K = 2 pi / lambda * col5 [A^-1]
29 K = 2 pi / lambda * col6 [A^-1]
30 S0-stokes = |Es|^2 + |Ep|^2
31 S1-stokes = |Es|^2 - |Ep|^2
32 S2-stokes = 2 |Es| |Ep| cos(phase_s-phase_p)
33 S3-stokes = 2 |Es| |Ep| sin(phase_s-phase_p)
'''
if nbins_h==None: nbins_h=nbins+1
try:
stp.plotxy_CheckArg(beam,cols1,cols2,nbins,nbins_h,level,xrange,yrange,nolost,title,xtitle,ytitle,noplot,calfwhm,contour)
except stp.ArgsError as e:
raise e
#plot_nicc.ioff()
plt.ioff()
col1,col2,col3,col4 = getshcol(beam,(cols1,cols2,10,23,))
nbins=nbins+1
if xtitle==None: xtitle=(stp.getLabel(cols1-1))[0]
if ytitle==None: ytitle=(stp.getLabel(cols2-1))[0]
if nolost==0: t = numpy.where(col3!=-3299)
if nolost==1: t = numpy.where(col3==1.0)
if nolost==2: t = numpy.where(col3!=1.0)
if xrange==None: xrange = stp.setGoodRange(col1[t])
if yrange==None: yrange = stp.setGoodRange(col2[t])
#print xrange
#print yrange
tx = numpy.where((col1>xrange[0])&(col1<xrange[1]))
ty = numpy.where((col2>yrange[0])&(col2<yrange[1]))
tf = set(list(t[0])) & set(list(tx[0])) & set(list(ty[0]))
t = (numpy.array(sorted(list(tf))),)
if len(t[0])==0:
print ("no point selected")
return None
#figure = pylab.plt.figure(figsize=(12,8),dpi=96)
figure = plt.figure(figsize=(12,8),dpi=96)
ratio = 8.0/12.0
left, width = 0.1*ratio, 0.65*ratio
bottom, height = 0.1, 0.65
bottom_h = bottom+height+0.02
left_h = left+width+0.02*ratio
rect_scatter = [0.10*ratio, 0.10, 0.65*ratio, 0.65]
rect_histx = [0.10*ratio, 0.77, 0.65*ratio, 0.20]
rect_histy = [0.77*ratio, 0.10, 0.20*ratio, 0.65]
rect_text = [1.00*ratio, 0.10, 1.20*ratio, 0.65]
axScatter = figure.add_axes(rect_scatter)
axScatter.set_xlabel(xtitle)
axScatter.set_ylabel(ytitle)
if contour==0:
axScatter.scatter(col1[t],col2[t],s=0.5)
if contour>0 and contour<7:
if contour==1 or contour==3 or contour==5: w = numpy.ones( len(col1) )
if contour==2 or contour==4 or contour==6: w = col4
grid = numpy.zeros(nbins*nbins).reshape(nbins,nbins)
for i in t[0]:
indX = stp.findIndex(col1[i],nbins,xrange[0],xrange[1])
indY = stp.findIndex(col2[i],nbins,yrange[0],yrange[1])
try:
grid[indX][indY] = grid[indX][indY] + w[i]
except IndexError:
pass
X, Y = numpy.mgrid[xrange[0]:xrange[1]:nbins*1.0j,yrange[0]:yrange[1]:nbins*1.0j]
L = numpy.linspace(numpy.amin(grid),numpy.amax(grid),level)
if contour==1 or contour==2: axScatter.contour(X, Y, grid, colors='k', levels=L)
if contour==3 or contour==4: axScatter.contour(X, Y, grid, levels=L)
if contour==5 or contour==6: axScatter.pcolor(X, Y, grid)
#axScatter.set_xlim(xrange)
#axScatter.set_ylim(yrange)
#axScatter.axis(xmin=xrange[0],xmax=xrange[1])
#axScatter.axis(ymin=yrange[0],ymax=yrange[1])
for tt in axScatter.get_xticklabels():
tt.set_size('x-small')
for tt in axScatter.get_yticklabels():
tt.set_size('x-small')
#if ref==0: col4 = numpy.ones(len(col4),dtype=float)
axHistx = figure.add_axes(rect_histx, sharex=axScatter)
axHisty = figure.add_axes(rect_histy, sharey=axScatter)
binx = numpy.linspace(xrange[0],xrange[1],nbins_h)
biny = numpy.linspace(yrange[0],yrange[1],nbins_h)
if contour==0 or contour==1 or contour==3 or contour==5:
hx, binx, patchx = axHistx.hist(col1[t],bins=binx,range=xrange,histtype='step',color='k')
hy, biny, patchy = axHisty.hist(col2[t],bins=biny,range=yrange,orientation='horizontal',histtype='step',color='k')
if contour==2 or contour==4 or contour==6:
hx, binx, patchx = axHistx.hist(col1[t],bins=binx,range=xrange,weights=col4[t],histtype='step',color='b')
hy, biny, patchy = axHisty.hist(col2[t],bins=biny,range=yrange,weights=col4[t],orientation='horizontal',histtype='step',color='b')
for tl in axHistx.get_xticklabels(): tl.set_visible(False)
for tl in axHisty.get_yticklabels(): tl.set_visible(False)
for tt in axHisty.get_xticklabels():
tt.set_rotation(270)
tt.set_size('x-small')
for tt in axHistx.get_yticklabels():
tt.set_size('x-small')
intensityinslit = 0.0
if calfwhm>=1:
fwhmx,txf, txi = stp.calcFWHM(hx,binx[1]-binx[0])
fwhmy,tyf, tyi = stp.calcFWHM(hy,biny[1]-biny[0])
axHistx.plot([binx[txi],binx[txf+1]],[max(hx)*0.5,max(hx)*0.5],'x-')
axHisty.plot([max(hy)*0.5,max(hy)*0.5],[biny[tyi],biny[tyf+1]],'x-')
print ("fwhm horizontal: %g" % fwhmx)
print ("fwhm vertical: %g" % fwhmy)
if calfwhm>=2:
xx1 = binx[txi]
xx2 = binx[txf+1]
yy1 = biny[tyi]
yy2 = biny[tyf+1]
print ("limits horizontal: %g %g " % (binx[txi],binx[txf+1]))
print ("limits vertical: %g %g " % (biny[tyi],biny[tyf+1]))
axScatter.plot([xx1,xx2,xx2,xx1,xx1],[yy1,yy1,yy2,yy2,yy1])
#fwhmx,txf, txi = stp.calcFWHM(hx,binx[1]-binx[0])
#fwhmy,tyf, tyi = stp.calcFWHM(hy,biny[1]-biny[0])
#calculate intensity in slit
if nolost==0: tt = numpy.where(col3!=-3299)
if nolost==1: tt = numpy.where(col3==1.0)
if nolost==2: tt = numpy.where(col3!=1.0)
ttx = numpy.where((col1>=xx1)&(col1<=xx2))
tty = numpy.where((col2>=yy1)&(col2<=yy2))
ttf = set(list(tt[0])) & set(list(ttx[0])) & set(list(tty[0]))
tt = (numpy.array(sorted(list(ttf))),)
if len(tt[0])>0:
intensityinslit = col4[tt].sum()
print ("Intensity in slit: %g ",intensityinslit)
if title!=None:
axHistx.set_title(title)
axText = figure.add_axes(rect_text)
ntot = len(numpy.where(col3!=3299)[0])
ngood = len(numpy.where(col3==1)[0])
nbad = ntot - ngood
if nolost==0: axText.text(0.0,0.8,"ALL RAYS")
if nolost==1: axText.text(0.0,0.8,"GOOD RAYS")
if nolost==2: axText.text(0.0,0.8,"LOST RAYS")
tmps = "intensity: "+str(col4[t].sum())
if calfwhm == 2:
tmps=tmps+" (in slit:"+str(intensityinslit)+") "
axText.text(0.0,0.7,tmps)
axText.text(0.0,0.6,"total number of rays: "+str(ntot))
axText.text(0.0,0.5,"total good rays: "+str(ngood))
axText.text(0.0,0.4,"total lost rays: "+str(ntot-ngood))
if calfwhm>=1:
axText.text(0.0,0.3,"fwhm H: "+str(fwhmx))
axText.text(0.0,0.2,"fwhm V: "+str(fwhmy))
if isinstance(beam,str): axText.text(0.0,0.1,"FILE: "+beam)
if isinstance(beam,sd.Beam): axText.text(0.0,0.1,"from Shadow3 Beam instance")
axText.text(0.0,0.0,"DIR: "+os.getcwd())
axText.set_axis_off()
#pylab.plt.draw()
plt.draw()
if noplot==0: figure.show()
ticket = plotxy_Ticket()
ticket.figure = figure
ticket.xrange = xrange
ticket.yrange = yrange
ticket.xtitle = xtitle
ticket.ytitle = ytitle
ticket.title = title
if calfwhm>=1:
ticket.fwhmx = fwhmx
ticket.fwhmy = fwhmy
ticket.intensity = col4[t].sum()
ticket.averagex = numpy.average( col1[t] )
ticket.averagey = numpy.average( col2[t] )
ticket.intensityinslit = intensityinslit
return ticket
#
#focnew
#
def focnew(beam,nolost=1,mode=0,center=[0.0,0.0]):
"""
Implements SHADOW's focnew utility
For scanning the RMS around the focal position, use focnew_scan with focnew results
:param beam: a file name or an instance of Shadow.Beam
:param nolost: 0=all rays, 1=good only, 2=lost only
:param mode: 0=center at origin, 1-Center at baricenter, 2=External center (please define)
:param center: [x0,y0] the center coordinates, if mode=2
:return: a python dictionary (ticket) with:
ticket['nolost'] # input flag
ticket['mode'] # input flag
ticket['center_at'] # text of mode: 'Origin','Baricenter' or 'External'
ticket['AX'] # \
ticket['AZ'] # focnew coefficients (to be used by focnew_scan)
ticket['AT'] # /
ticket['x_waist'] # position of waist X
ticket['z_waist'] # position of waist Z
ticket['t_waist'] # position of waist T (averaged)
ticket['text'] = txt # a text with focnew info
"""
NMODE = ['Origin','Baricenter','External']
if isinstance(beam,str):
beam1 = Shadow.Beam()
beam1.load(beam)
else:
beam1 = beam
# get focnew coefficients
ray = numpy.array(beam1.getshcol([1,2,3,4,5,6],nolost=nolost))
#ray = numpy.array(self.getshcol([1,2,3,4,5,6],nolost=nolost))
if mode == 2:
ray[:,0] -= center[0]
ray[:,2] -= center[1]
AX,AZ,AT = _focnew_coeffs(ray,nolost=nolost,mode=mode,center=center)
# store versors
ZBAR = AZ[3]
VZBAR = AZ[5]
#
XBAR = AX[3]
VXBAR = AX[5]
#
TBAR = ZBAR + XBAR
VTBAR = VZBAR + VXBAR
#reset coeffs
if mode != 1:
AZ[3] = 0.0
AZ[4] = 0.0
AZ[5] = 0.0
AX[3] = 0.0
AX[4] = 0.0
AX[5] = 0.0
AT[3] = 0.0
AT[4] = 0.0
AT[5] = 0.0
#get Y coordinate of the three waists
if numpy.abs(AZ[0]-AZ[5]) > 1e-30:
TPARZ = (AZ[4] - AZ[1]) / (AZ[0] - AZ[5])
else:
TPARZ = 0.0
if numpy.abs(AX[0]-AX[5]) > 1e-30:
TPARX = (AX[4] - AX[1]) / (AX[0] - AX[5])
else:
TPARX = 0.0
if numpy.abs(AT[0]-AX[5]) > 1e-30:
TPART = (AT[4] - AT[1]) / (AT[0] - AT[5])
else:
TPART = 0.0
#prepare text output
txt = ""
txt += '-----------------------------------------------------------------------------\n'
txt += 'Center at : %s\n'%(NMODE[mode])
txt += 'X = %f Z = %f\n'%(center[0],center[1])
txt += '-----------------------------------------------------------------------------\n'
SIGX = numpy.sqrt(numpy.abs( AX[0] * TPARX**2 + 2.0 * AX[1] * TPARX + AX[2] - ( AX[3] + 2.0 * AX[4] * TPARX + AX[5] * TPARX**2)))
SIGZ = numpy.sqrt(numpy.abs( AZ[0] * TPARZ**2 + 2.0 * AZ[1] * TPARZ + AZ[2] - ( AZ[3] + 2.0 * AZ[4] * TPARZ + AZ[5] * TPARZ**2)))
SIGT = numpy.sqrt(numpy.abs( AT[0] * TPART**2 + 2.0 * AT[1] * TPART + AT[2] - ( AT[3] + 2.0 * AT[4] * TPART + AT[5] * TPART**2)))
SIGX0 = numpy.sqrt(numpy.abs(AX[2] - AX[3]))
SIGZ0 = numpy.sqrt(numpy.abs(AZ[2] - AZ[3]))
SIGT0 = numpy.sqrt(numpy.abs(AT[2] - AT[3]))
# txt += '............. S A G I T T A L ............\n'
txt += '............. X AXIS (column 1) ............\n'
txt += 'X coefficients : %g %g %g\n'%(AX[0],AX[1],AX[2])
txt += 'Center : %g Average versor : %g\n'%(numpy.sqrt(numpy.abs(XBAR)),numpy.sqrt(numpy.abs(VXBAR)))
txt += 'Focus along X at : %g\n'%(TPARX)
txt += 'Waist size at best focus (rms) : %g\n'%(SIGX)
txt += 'Waist size at origin : %g\n'%(SIGX0)
# txt += '............. T A N G E N T I A L .............\n'
txt += '............. Z AXIS (column 3) ............\n'
txt += 'Z coefficients : %g %g %g\n'%(AZ[0],AZ[1],AZ[2])
txt += 'Center : %g Average versor : %g\n'%(numpy.sqrt( | numpy.abs(ZBAR) | numpy.abs |
from numpy import pi
import numpy as np
import math
#from sympy import Matrix
import pylab
#import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#from scipy.interpolate import Rbf
import pickle
from scipy.sparse import csr_matrix
from scipy.sparse import lil_matrix
from scipy.sparse.linalg import spsolve
#Data
#Here the forcing diffusion coefficient, forcing terms, initial conditions, Dirichlet and Neumann conditions of the
#problem:
#Find E,B such that
#B_t=-curl E
#E=-nu curl B
def DiffusionCoeff(x,y):
#This is the diffusion coefficient
#this function is scalar valued
return 1
def EssentialBoundaryCond(x,y,t):
#These are the essecial boundary conditions
#return math.exp(t+x)-math.exp(t+y) #Solution#1 does not include J cross B term
#return 1 #Solution #2 does not include J cross B term but is linear
#return -math.exp(y+t) #Solution #3 it includes J cross B term
#return math.exp(t+x)-math.exp(t+y)#+math.exp(t) #Solution 4 includes J cross B term
#return 20*math.exp(t+x)-20*math.exp(t+y)-x*y*math.exp(t) #Solution 4 Includes J cross B term
#return ( 50*(math.exp(x)-math.exp(y))+math.cos(x*y)+math.sin(x*y) )*math.exp(t) #Solution 5 includ
return -( 50*(math.exp(x)-math.exp(y))+math.cos(x*y)+math.sin(x*y) )*math.exp(-t)
def InitialCond(x,y):
#These are the initial condition on the magnetic field
#r must be a 2 dimensional array
#this function is vector valued
#It must be divergence free
#return math.exp(y),math.exp(x) #Solution #1 does not include J cross B term
#return 2*y,3*x #Solution #1 does not include J cross B term but is linear
#return math.exp(y),0 #Solution #3 it includes J cross B term
#return math.exp(y),math.exp(x) #Solution#4 includes J cross B term
#return 20*math.exp(y)+x,20*math.exp(x)-y#Solution 5 Includes J cross B term
# Bx = 50*math.exp(y)+x*math.sin(x*y)-x*math.cos(x*y)
# By = 50*math.exp(x)-y*math.sin(x*y)+y*math.cos(x*y)
# return Bx,By #Solution 6 includes JxB
Bx = 50*math.exp(y)+x*math.sin(x*y)-x*math.cos(x*y)
By = 50*math.exp(x)-y*math.sin(x*y)+y*math.cos(x*y)
return Bx,By #Solution 6 includes JxB
def ExactB(x,y,t):
#This is the exact Magnetic field
#Must be divergence free
#return math.exp(t+y),math.exp(t+x) #Solution #1 does not include J cross B term
#return 2*y,3*x #Solution #2 does not include J cross B term but is linear
#return math.exp(y+t),0 #Solution #3 it includes J cross B term
#return math.exp(t+y),math.exp(t+x) #Solution#4 includes J cross B term
#return 20*math.exp(y+t)+x*math.exp(t),20*math.exp(x+t)-y*math.exp(t)#Solution 5 Includes J cross B term
Bx = ( 50*math.exp(y)+x*math.sin(x*y)-x*math.cos(x*y) )*math.exp(-t)
By = ( 50*math.exp(x)-y*math.sin(x*y)+y*math.cos(x*y) )*math.exp(-t)
return Bx,By#Solution 6 includes JxB
def ExactE(x,y,t):
#This is the exact Electric field
#return math.exp(t+x)-math.exp(t+y) #Solution #1 does not include J cross B term
#return 1 #Solution #2 does not include J cross B term but is linear
#return -math.exp(y+t) #Solution#3 includes J cross B term
#return math.exp(t+x)-math.exp(t+y)+math.exp(t) #Solution 4 Includes J cross B term
#return 20*math.exp(t+x)-20*math.exp(t+y)-x*y*math.exp(t)#Solution 5 Includes J cross B term
# return ( 50*( math.exp(x)-math.exp(y) )+math.cos(x*y)+math.sin(x*y) )*math.exp(t) #Solution 6 includes JxB
return -( 50*(math.exp(x)-math.exp(y))+math.cos(x*y)+math.sin(x*y) )*math.exp(-t)
def J(x,y):
#return 0,0 #for solutions that do not inclide J x B
#return math.exp(y),0 #Solution#3 includes J cross B term
#return 2*math.exp(-x),math.exp(-y) #Solution#4 includes J cross B term
#return 0,03s
#return -(x*y)/(40*math.exp(x)-2*y),(x*y)/(40*math.exp(y)+2*x) #solution 5 includes JxB
# Jx = ( (x**2+y**2+1)*(math.sin(x*y)+math.cos(x*y)) )/( 2*(50*math.exp(x)-y*math.sin(x*y)+y*math.cos(x*y)) )
# Jy = -( (x**2+y**2+1)*(math.sin(x*y)+math.cos(x*y)) )/( 2*(50*math.exp(y)+x*math.sin(x*y)-x*math.cos(x*y)) )
# return Jx,Jy #Solution 6 includes JxB
Jx = ( (x**2+y**2-1)*(math.sin(x*y)+math.cos(x*y))-100*math.exp(x)+100*math.exp(y) )/( 2*(50*math.exp(x)-y*math.sin(x*y)+y*math.cos(x*y)) )
Jy = -( (x**2+y**2-1)*(math.sin(x*y)+math.cos(x*y))-100*math.exp(x)+100*math.exp(y) )/( 2*(50*math.exp(y)+x*math.sin(x*y)-x*math.cos(x*y)) )
return Jx,Jy #Solution 6 includes JxB
def Poly1(x,y):
return 1,0
def Poly2(x,y):
return 0,1
def Poly3(x,y):
return x,0
def Poly4(x,y):
return y,0
def Poly5(x,y):
return 0,x
def Poly6(x,y):
return 0,y
def Poly(x,y):
return x,y
#MeshRetrievalFunctions
def GetMesh(file):
#This function will, provided a text file in the format of meshes in Mathematica,
#return the coordinates of the nodes, the Edge Nodes and the Nodes of each element
UnprocessedMesh=open(file).read()
FirstCut=UnprocessedMesh.split('}}')
Nodes=FirstCut[0]+'}'
EdgeNodes,Elements,trash=FirstCut[1].split(']}')
for rep in (('{','['),('}',']'),('*^','*10**'),('Line[',''),('Polygon[',''),(']]',']')):
Nodes=Nodes.replace(rep[0],rep[1])
EdgeNodes=EdgeNodes.replace(rep[0],rep[1]) #Replace the characters in the first position of the
Elements=Elements.replace(rep[0],rep[1]) #parenthesis with the second character
Nodes=Nodes+']'
EdgeNodes = EdgeNodes+']'
Elements = Elements+']' #add the last ]
Nodes = eval(Nodes)
EdgeNodes = eval(EdgeNodes)# turn string to list
Elements = eval(Elements)
EdgeNodes = [(np.array(y)-1).tolist() for y in EdgeNodes]
Elements = [(np.array(y)-1).tolist() for y in Elements]
#EdgeNodes=np.array(EdgeNodes)-1
#Elements=np.array(Elements)-1 #subtract one from each position in the array(lists in mathematica begin with 1)
return Nodes,EdgeNodes,Elements
#def EdgesElement(EdgeNodes,Elements):
#This function will return the Edges of an element provided a list of the Nodes of the edges
#and the nodes of the elements
# NumberElements=len(Elements)
# NumberEdges=len(EdgeNodes)
# ElementEdges=[0]*NumberElements
# for i in range(NumberElements): #loop over elements
# NumberNodesEdges=len(Elements[i]) #keep in mind there are the same number of Edges as there a
#of nodes
# ElementEdge=[0]*NumberNodesEdges
# Element=[0]*(NumberNodesEdges+1)
# Element[0:NumberNodesEdges]=Elements[i] #pick a particular element
# Element[NumberNodesEdges]=Element[0] # add the first vertex as the last vertex (so that edges become every pair)
# for j in range(NumberNodesEdges): #run over every pair of vertices(edges)
# Edge=[Element[j],Element[j+1]]
# for ell in range(NumberEdges): #run over all edges
# if Edge==EdgeNodes[ell] or [Edge[1],Edge[0]]==EdgeNodes[ell]: #if an the edge agrees, in any direction,
# ElementEdge[j]=ell #with one in the list of edges then we have
#
# ElementEdges[i]=ElementEdge
# return ElementEdges #identified the edge
def EdgesElement(EdgeNodes,Elements):
#This function will return the Edges of an element provided a list of the Nodes of the edges
#and the nodes of the elements
NumberElements = len(Elements)
NumberEdges = len(EdgeNodes)
ElementEdges = [0]*NumberElements
for i in range(NumberElements): #loop over elements
NumberNodesEdges = len(Elements[i]) #keep in mind there are the same number of Edges as there a
#of nodes
ElementEdge = [0]*NumberNodesEdges
Element = [0]*(NumberNodesEdges+1)
Element[0:NumberNodesEdges] = Elements[i] #pick a particular element
Element[NumberNodesEdges] = Element[0] # add the first vertex as the last vertex (so that edges become every pair)
for j in range(NumberNodesEdges): #run over every pair of vertices(edges)
Edge = [Element[j],Element[j+1]]
if Edge in EdgeNodes:
ElementEdge[j] = EdgeNodes.index(Edge)
else:
Edge = [Element[j+1],Element[j]]
ElementEdge[j] = EdgeNodes.index(Edge)
ElementEdges[i]=ElementEdge
return ElementEdges
def Boundary(Nodes):
#Given an array of Nodes this routine gives the nodes that lie on the boundary
NumberNodes=len(Nodes)
BoundaryNodes=[-1]*NumberNodes
NumberBoundaryNodes=0
for i in range(NumberNodes):
Node=Nodes[i]
if abs(Node[0]-1)<10**-10 or abs(Node[0]+1)<10**-10 or abs(Node[1]-1)<10**-10 or abs(Node[1]+1)<10**-10:
BoundaryNodes[NumberBoundaryNodes]=i
NumberBoundaryNodes=NumberBoundaryNodes+1
return BoundaryNodes[0:NumberBoundaryNodes]
def Mesh(file):
#Provided a file with a mesh in the language of mathematica this routine will return
#four lists, Nodes, EdgeNodes,ElementEdges,BoundaryNodes
Nodes,EdgeNodes,Elements=GetMesh(file)
ElementEdges=EdgesElement(EdgeNodes,Elements)
BoundaryNodes=Boundary(Nodes)
return Nodes,EdgeNodes,ElementEdges,BoundaryNodes
def FindVertecesEdges(Nodes,EdgeNodes):
#This function, given a set of Edges, will return an array
#the ith element of this array is the set of all edges that
#have the ith vertex as an edpoint
NumberNodes=len(Nodes)
VertecesEdges=[[]]*NumberNodes
i=0
for Edge in EdgeNodes:
v1=Edge[0]
v2=Edge[1]
VertecesEdges[v1]=list(set().union(VertecesEdges[v1],[i]))
VertecesEdges[v2]=list(set().union(VertecesEdges[v2],[i]))
i=i+1
return VertecesEdges
def ProcessedMesh(Pfile):
with open(Pfile, "rb") as fp: # Unpickling
N,E,EE,B,O = pickle.load(fp)
return N,E,EE,B,O
#AuxiliaryFunctions
def ElementCoordinates(Element,EdgeNodes,Nodes):
#provided an element of a mesh this function returns its vertices
#as an array of the dimension of the number of edges on the element
N=len(Element)
Vertices=[0]*N #The number of vertices agrees with the number of edges
Edge=Element[0]
Vertices[0]=Nodes[EdgeNodes[Edge][0]]
Vertices[1]=Nodes[EdgeNodes[Edge][1]] #The first two vertices are those in the first edge
for i in range(1,N-1):
Edge=EdgeNodes[Element[i]]
v1=Nodes[Edge[0]]
v2=Nodes[Edge[1]] #new vertex added is the one on the i+1 edge that is not in the ith edge
if Vertices[i-1]==v1:
Vertices[i+1]=v2
elif Vertices[i-1]==v2:
Vertices[i+1]=v1
elif Vertices[i]==v1:
Vertices[i+1]=v2
else:
Vertices[i+1]=v1
return Vertices
def Orientation(Element,EdgeNodes,Nodes):
#Provided an element of a mesh this function returns a vector of dimension
#as large as the number of edges with the orientation of the normal vector
#1 if the orientation is outward and -1 if it is inward.
#This algorithm assumes that the element is convex
N=len(Element)
#first we need to find a point inside the element to give indication of the orientation
Vertices=ElementCoordinates(Element,EdgeNodes,Nodes)
xinside=0
yinside=0
for i in range(N):
xinside=xinside+Vertices[i][0]
yinside=yinside+Vertices[i][1]
xinside=xinside/N
yinside=yinside/N #since the element is convex any convex combination of the vertices lies inside
#Now we move on to finding the orientation of the edges
Ori=[0]*(N+1)
for i in range(N):
Edge=EdgeNodes[Element[i]]
[x1,y1]=Nodes[Edge[0]]
[x2,y2]=Nodes[Edge[1]]
#This is the inner product between n, the vector t=(x2,y2)-(x1,y1) rotated pi/2 counterclockwise
# and the vector u=(xinside,yinside)-(x1,y1) which should point towards the interior of the element
#if the result is negative it means that the angle between t and u is larger than 90 which implies that
#n points outside of the element
sign=(y2-y1)*(xinside-x1)+(x1-x2)*(yinside-y1)
if sign<0:
Ori[i]=1
else:
Ori[i]=-1
Ori[N]=Ori[0]
return Ori
def StandardElement(Element,EdgeNodes,Nodes,Ori):
#This routine will reorient, if necessary, the edges of the element to agree with stokes theorem,
#This is to say that the edges will be reoriented in such a way that the element will be traversed in the
#Counterclockwise direction and rotation by pi/2 in the counterclockwise direction of the tangential vector
#will result in an outward normal vector.
#The last vertex,edge will be the first. This is in order to complete the loop.
N = len(Element)
OrientedEdges = [0]*(N+1)
OrientedVertices = [0]*(N+1)
for i in range(N):
if Ori[i]==1:
OrientedEdges[i] = EdgeNodes[Element[i]] #If they are "well-oriented" then do not alter them
else:
[v1,v2] = EdgeNodes[Element[i]] #Otherwise reverse the order of their vertices
OrientedEdges[i] = [v2,v1]
OrientedVertices[i] = Nodes[OrientedEdges[i][0]]
OrientedEdges[N] = OrientedEdges[0]
OrientedVertices[N] = OrientedVertices[0]
return OrientedVertices,OrientedEdges
def Centroid(Element,EdgeNodes,Nodes,Ori):
#This function, when provided with an element, will return its centroid or barycenter.
N = len(Element)
Cx = 0
Cy = 0
A = 0
Vertices,Edges = StandardElement(Element,EdgeNodes,Nodes,Ori)
for i in range(N):
xi = Vertices[i][0]
yi = Vertices[i][1]
xiplusone = Vertices[i+1][0]
yiplusone = Vertices[i+1][1]
Cx = Cx+(xi+xiplusone)*(xi*yiplusone-xiplusone*yi) #This formula is in Wikipedia
Cy = Cy+(yi+yiplusone)*(xi*yiplusone-xiplusone*yi)
A = A+xi*yiplusone-xiplusone*yi
A = 0.5*A
Cx = Cx/(6*A)
Cy = Cy/(6*A)
return Cx,Cy,A,Vertices,Edges
def InternalObjects(Boundary,Objects):
#provided a set of geometrical objects, say vertices or edges, this routine returns those that
#are internal.
N=len(Objects)
Internal=np.sort(np.array(list(set(np.arange(N))-set(Boundary))))
NumberInternal=len(Internal)
return Internal,NumberInternal
#Assembly
def LocprojE(Func,Element,EdgeNodes,Nodes):
#This function will, provided a function a set of nodes and edges, compute the
#projection onto the space of edge-based functions. The direction of the unit normal
#will be assumed to be the clockwise rotation of the tangential vector.
N=len(Element)
proj=np.zeros((N,1))
j = 0
for i in Element:
x1=Nodes[EdgeNodes[i][0]][0]
y1=Nodes[EdgeNodes[i][0]][1]
x2=Nodes[EdgeNodes[i][1]][0]
y2=Nodes[EdgeNodes[i][1]][1]
lengthe=math.sqrt((x2-x1)**2+(y2-y1)**2)
xmid=0.5*(x1+x2)
ymid=0.5*(y1+y2)
etimesnormal=[y2-y1,x1-x2]
Fx,Fy=Func(xmid,ymid)
proj[j]=(etimesnormal[0]*Fx+etimesnormal[1]*Fy)*lengthe**-1 #midpoint rule
j = j+1
return proj
def LocalMassMatrix(N,R,n,A,nu):
#Given the matrices N,R as defined in Ch.4 of MFD book and the dimension
#of the reconstruction space this function assembles the local mass matrix
#The formula is M=M0+M1 where M0=R(N^T R)^-1R^T and M1=lamb*DD^T where the
#columns of D span the null-space of N^T and lamb=2*trace(M0)/n
#n is the dimension of the reconstruction space
#nu is the average, over the element, of the diffusion coefficient
#A is the area of the element
#These commands compute M0
M0=np.matmul(np.transpose(N),R)
M0=np.linalg.inv(M0)
M0=np.matmul(R,M0)
M0=np.matmul(M0, | np.transpose(R) | numpy.transpose |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 2 11:08:09 2020
@author: alvarezguido
GITHUB: https://github.com/alvarezguido
"""
"""
SYNOPSIS
----
----
-----
"""
import simpy
import random
import numpy as np
import math
#import sys
#import re
import matplotlib.pyplot as plt
#import os
#import operator
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
#import PIL
import random
import re
import os
import datetime
import sys
name = "LT"
mode_debbug = 0
if not mode_debbug:
null = open(os.devnull, 'w')
old_stdout = sys.stdout
sys.stdout = null
####WE START BY USING SF=12 ADN BW=125 AND CR=1, FOR ALL NODES AND ALL TRANSMISIONS######
if mode_debbug:
RANDOM_SEED = 5
chan = 1
packetlen = 20
total_data = 60
beacon_time = 120
maxBSReceives = 16
multi_nodes = [10]
else:
RANDOM_SEED = int(sys.argv[1])
chan = int(sys.argv[2])
packetlen = int(sys.argv[3]) ##NODES SEND PACKETS OF JUST 20 Bytes
total_data = int(sys.argv[4]) ##TOTAL DATA ON BUFFER, FOR EACH NODE (IT'S THE BUFFER O DATA BEFORE START SENDING)
beacon_time = int(sys.argv[5]) ###SAT SENDS BEACON EVERY CERTAIN TIME
maxBSReceives = int(sys.argv[6]) ##MAX NUMBER OF PACKETS THAT BS (ie SATELLITE) CAN RECEIVE AT SAME TIME
multi_nodes = [int(sys.argv[7]), int(sys.argv[8]) ,int(sys.argv[9]), int(sys.argv[10]),int(sys.argv[11]),int(sys.argv[12]),int(sys.argv[13]),int(sys.argv[14]),int(sys.argv[15]),int(sys.argv[16]),int(sys.argv[17]),int(sys.argv[18]),int(sys.argv[19]),int(sys.argv[20])]
random.seed(RANDOM_SEED) #RANDOM SEED IS FOR GENERATE ALWAYS THE SAME RANDOM NUMBERS (ie SAME RESULTS OF SIMULATION)
nodesToSend = []
packetsToSend = math.ceil(total_data/packetlen)
###GLOBAL PARAMS ####
bsId = 1 ##ID OF BASE STATION (NOT USED)
channel = [0,1,2] ##NOT USED BY NOW
avgSendTime = 3 ## NOT USED! --> A NODE SENDS A PACKET EVERY X SECS
back_off = beacon_time * 0.95 ###BACK OFF TIME FOR SEND A PACKET
packetsAtBS = [] ##USED FOR CHEK IF THERE ARE ALREADY PACKETS ON THE SATELLITE
c = 299792.458 ###SPEED LIGHT [km/s]
Ptx = 14
G_device = 0; ##ANTENNA GAIN FOR AN END-DEVICE
G_sat = 12; ##ANTENNA GAIN FOR SATELLITE
nodes = [] ###EACH NODE WILL BE APPENDED TO THIS VARIABLE
freq =868e6 ##USED FOR PATH LOSS CALCULATION
frequency = [868100000, 868300000, 868500000] ##FROM LORAWAN REGIONAL PARAMETERS EU863-870 / EU868
nrLost = 0 ### TOTAL OF LOST PACKETS DUE Lpl
nrCollisions = 0 ##TOTAL OF COLLIDED PACKETS
nrProcessed = 0 ##TOTAL OF PROCESSED PACKETS
nrReceived = 0 ###TOTAL OF RECEIVED PACKETS
##ARRAY WITH MEASURED VALUES FOR SENSIBILITY, NEW VALUES
##THE FOLLOWING VALUES CORRESPOND TO:
# - FIRST ELEMENT: IT'S THE SF (NOT USABLE)
# - SECOND ELEMENT: SENSIBILITY FOR 125KHZ BW
# - THIRD ELEMENT: SENSIBILITY FOR 250KHZ BW
# - FOURTH ELEMENT: SENSIBILITY FOR 500KHZ BW
# NOTICE THAT SENSIBILITY DECREASE ALONG BW INCREASES, ALSO WITH LOWER SF
# THIS VALUES RESPONDS TO:
# wf = -174 + 10 log(BW) +NF +SNRf
sf7 = np.array([7,-123,-120,-117.0])
sf8 = | np.array([8,-126,-123,-120.0]) | numpy.array |
# Copyright (c) 2019 <NAME>.
# Cura is released under the terms of the LGPLv3 or higher.
import numpy
from cura.Arranging.Arrange import Arrange
from cura.Arranging.ShapeArray import ShapeArray
## Triangle of area 12
def gimmeTriangle():
return numpy.array([[-3, 1], [3, 1], [0, -3]], dtype=numpy.int32)
## Boring square
def gimmeSquare():
return numpy.array([[-2, -2], [2, -2], [2, 2], [-2, 2]], dtype=numpy.int32)
## Triangle of area 12
def gimmeShapeArray(scale = 1.0):
vertices = gimmeTriangle()
shape_arr = ShapeArray.fromPolygon(vertices, scale = scale)
return shape_arr
## Boring square
def gimmeShapeArraySquare(scale = 1.0):
vertices = gimmeSquare()
shape_arr = ShapeArray.fromPolygon(vertices, scale = scale)
return shape_arr
## Smoke test for Arrange
def test_smoke_arrange():
Arrange.create(fixed_nodes = [])
## Smoke test for ShapeArray
def test_smoke_ShapeArray():
gimmeShapeArray()
## Test ShapeArray
def test_ShapeArray():
scale = 1
ar = Arrange(16, 16, 8, 8, scale = scale)
ar.centerFirst()
shape_arr = gimmeShapeArray(scale)
count = len(numpy.where(shape_arr.arr == 1)[0])
assert count >= 10 # should approach 12
## Test ShapeArray with scaling
def test_ShapeArray_scaling():
scale = 2
ar = Arrange(16, 16, 8, 8, scale = scale)
ar.centerFirst()
shape_arr = gimmeShapeArray(scale)
count = len(numpy.where(shape_arr.arr == 1)[0])
assert count >= 40 # should approach 2*2*12 = 48
## Test ShapeArray with scaling
def test_ShapeArray_scaling2():
scale = 0.5
ar = Arrange(16, 16, 8, 8, scale = scale)
ar.centerFirst()
shape_arr = gimmeShapeArray(scale)
count = len(numpy.where(shape_arr.arr == 1)[0])
assert count >= 1 # should approach 3, but it can be inaccurate due to pixel rounding
## Test centerFirst
def test_centerFirst():
ar = Arrange(300, 300, 150, 150, scale = 1)
ar.centerFirst()
assert ar._priority[150][150] < ar._priority[170][150]
assert ar._priority[150][150] < ar._priority[150][170]
assert ar._priority[150][150] < ar._priority[170][170]
assert ar._priority[150][150] < ar._priority[130][150]
assert ar._priority[150][150] < ar._priority[150][130]
assert ar._priority[150][150] < ar._priority[130][130]
## Test centerFirst
def test_centerFirst_rectangular():
ar = Arrange(400, 300, 200, 150, scale = 1)
ar.centerFirst()
assert ar._priority[150][200] < ar._priority[150][220]
assert ar._priority[150][200] < ar._priority[170][200]
assert ar._priority[150][200] < ar._priority[170][220]
assert ar._priority[150][200] < ar._priority[180][150]
assert ar._priority[150][200] < ar._priority[130][200]
assert ar._priority[150][200] < ar._priority[130][180]
## Test centerFirst
def test_centerFirst_rectangular2():
ar = Arrange(10, 20, 5, 10, scale = 1)
ar.centerFirst()
assert ar._priority[10][5] < ar._priority[10][7]
## Test backFirst
def test_backFirst():
ar = Arrange(300, 300, 150, 150, scale = 1)
ar.backFirst()
assert ar._priority[150][150] < ar._priority[170][150]
assert ar._priority[150][150] < ar._priority[170][170]
assert ar._priority[150][150] > ar._priority[130][150]
assert ar._priority[150][150] > ar._priority[130][130]
## See if the result of bestSpot has the correct form
def test_smoke_bestSpot():
ar = Arrange(30, 30, 15, 15, scale = 1)
ar.centerFirst()
shape_arr = gimmeShapeArray()
best_spot = ar.bestSpot(shape_arr)
assert hasattr(best_spot, "x")
assert hasattr(best_spot, "y")
assert hasattr(best_spot, "penalty_points")
assert hasattr(best_spot, "priority")
## Real life test
def test_bestSpot():
ar = Arrange(16, 16, 8, 8, scale = 1)
ar.centerFirst()
shape_arr = gimmeShapeArray()
best_spot = ar.bestSpot(shape_arr)
assert best_spot.x == 0
assert best_spot.y == 0
ar.place(best_spot.x, best_spot.y, shape_arr)
# Place object a second time
best_spot = ar.bestSpot(shape_arr)
assert best_spot.x is not None # we found a location
assert best_spot.x != 0 or best_spot.y != 0 # it can't be on the same location
ar.place(best_spot.x, best_spot.y, shape_arr)
## Real life test rectangular build plate
def test_bestSpot_rectangular_build_plate():
ar = Arrange(16, 40, 8, 20, scale = 1)
ar.centerFirst()
shape_arr = gimmeShapeArray()
best_spot = ar.bestSpot(shape_arr)
ar.place(best_spot.x, best_spot.y, shape_arr)
assert best_spot.x == 0
assert best_spot.y == 0
# Place object a second time
best_spot2 = ar.bestSpot(shape_arr)
assert best_spot2.x is not None # we found a location
assert best_spot2.x != 0 or best_spot2.y != 0 # it can't be on the same location
ar.place(best_spot2.x, best_spot2.y, shape_arr)
# Place object a 3rd time
best_spot3 = ar.bestSpot(shape_arr)
assert best_spot3.x is not None # we found a location
assert best_spot3.x != best_spot.x or best_spot3.y != best_spot.y # it can't be on the same location
assert best_spot3.x != best_spot2.x or best_spot3.y != best_spot2.y # it can't be on the same location
ar.place(best_spot3.x, best_spot3.y, shape_arr)
best_spot_x = ar.bestSpot(shape_arr)
ar.place(best_spot_x.x, best_spot_x.y, shape_arr)
best_spot_x = ar.bestSpot(shape_arr)
ar.place(best_spot_x.x, best_spot_x.y, shape_arr)
best_spot_x = ar.bestSpot(shape_arr)
ar.place(best_spot_x.x, best_spot_x.y, shape_arr)
## Real life test
def test_bestSpot_scale():
scale = 0.5
ar = Arrange(16, 16, 8, 8, scale = scale)
ar.centerFirst()
shape_arr = gimmeShapeArray(scale)
best_spot = ar.bestSpot(shape_arr)
assert best_spot.x == 0
assert best_spot.y == 0
ar.place(best_spot.x, best_spot.y, shape_arr)
# Place object a second time
best_spot = ar.bestSpot(shape_arr)
assert best_spot.x is not None # we found a location
assert best_spot.x != 0 or best_spot.y != 0 # it can't be on the same location
ar.place(best_spot.x, best_spot.y, shape_arr)
## Real life test
def test_bestSpot_scale_rectangular():
scale = 0.5
ar = Arrange(16, 40, 8, 20, scale = scale)
ar.centerFirst()
shape_arr = gimmeShapeArray(scale)
shape_arr_square = gimmeShapeArraySquare(scale)
best_spot = ar.bestSpot(shape_arr_square)
assert best_spot.x == 0
assert best_spot.y == 0
ar.place(best_spot.x, best_spot.y, shape_arr_square)
# Place object a second time
best_spot = ar.bestSpot(shape_arr)
assert best_spot.x is not None # we found a location
assert best_spot.x != 0 or best_spot.y != 0 # it can't be on the same location
ar.place(best_spot.x, best_spot.y, shape_arr)
best_spot = ar.bestSpot(shape_arr_square)
ar.place(best_spot.x, best_spot.y, shape_arr_square)
## Try to place an object and see if something explodes
def test_smoke_place():
ar = Arrange(30, 30, 15, 15)
ar.centerFirst()
shape_arr = gimmeShapeArray()
assert not numpy.any(ar._occupied)
ar.place(0, 0, shape_arr)
assert numpy.any(ar._occupied)
## See of our center has less penalty points than out of the center
def test_checkShape():
ar = Arrange(30, 30, 15, 15)
ar.centerFirst()
shape_arr = gimmeShapeArray()
points = ar.checkShape(0, 0, shape_arr)
points2 = ar.checkShape(5, 0, shape_arr)
points3 = ar.checkShape(0, 5, shape_arr)
assert points2 > points
assert points3 > points
## See of our center has less penalty points than out of the center
def test_checkShape_rectangular():
ar = Arrange(20, 30, 10, 15)
ar.centerFirst()
shape_arr = gimmeShapeArray()
points = ar.checkShape(0, 0, shape_arr)
points2 = ar.checkShape(5, 0, shape_arr)
points3 = ar.checkShape(0, 5, shape_arr)
assert points2 > points
assert points3 > points
## Check that placing an object on occupied place returns None.
def test_checkShape_place():
ar = Arrange(30, 30, 15, 15)
ar.centerFirst()
shape_arr = gimmeShapeArray()
ar.checkShape(3, 6, shape_arr)
ar.place(3, 6, shape_arr)
points2 = ar.checkShape(3, 6, shape_arr)
assert points2 is None
## Test the whole sequence
def test_smoke_place_objects():
ar = Arrange(20, 20, 10, 10, scale = 1)
ar.centerFirst()
shape_arr = gimmeShapeArray()
for i in range(5):
best_spot_x, best_spot_y, score, prio = ar.bestSpot(shape_arr)
ar.place(best_spot_x, best_spot_y, shape_arr)
# Test some internals
def test_compare_occupied_and_priority_tables():
ar = Arrange(10, 15, 5, 7)
ar.centerFirst()
assert ar._priority.shape == ar._occupied.shape
## Polygon -> array
def test_arrayFromPolygon():
vertices = numpy.array([[-3, 1], [3, 1], [0, -3]])
array = ShapeArray.arrayFromPolygon([5, 5], vertices)
assert numpy.any(array)
## Polygon -> array
def test_arrayFromPolygon2():
vertices = numpy.array([[-3, 1], [3, 1], [2, -3]])
array = ShapeArray.arrayFromPolygon([5, 5], vertices)
assert numpy.any(array)
## Polygon -> array
def test_fromPolygon():
vertices = numpy.array([[0, 0.5], [0, 0], [0.5, 0]])
array = ShapeArray.fromPolygon(vertices, scale=0.5)
assert numpy.any(array.arr)
## Line definition -> array with true/false
def test_check():
base_array = | numpy.zeros([5, 5], dtype=float) | numpy.zeros |
from __future__ import division
from builtins import range
from sciunit import Score
import numpy
from sciunit.utils import assert_dimensionless
class ZScore_somaticSpiking(Score):
"""
Mean of Z scores. A float indicating the sum of standardized difference
from reference means for somatic spiking features.
"""
def __init__(self, score, related_data={}):
if not isinstance(score, Exception) and not isinstance(score, float):
raise InvalidScoreError("Score must be a float.")
else:
super(ZScore_somaticSpiking,self).__init__(score, related_data=related_data)
@classmethod
def compute(cls, observation, prediction):
"""Computes average of z-scores from observation and prediction for somatic spiking features"""
feature_errors=numpy.array([])
features_names=(list(observation.keys()))
feature_results_dict={}
bad_features = []
for i in range (0, len(features_names)):
p_value = prediction[features_names[i]]['feature mean']
o_mean = float(observation[features_names[i]]['Mean'])
o_std = float(observation[features_names[i]]['Std'])
p_std = prediction[features_names[i]]['feature sd']
try:
feature_error = abs(p_value - o_mean)/o_std
feature_error = assert_dimensionless(feature_error)
except ZeroDivisionError:
feature_error = float("inf")
feature_error = float("inf")
except (TypeError,AssertionError) as e:
feature_error = e
#feature_errors=numpy.append(feature_errors,feature_error)
feature_result={features_names[i]: feature_error}
feature_results_dict.update(feature_result)
if numpy.isnan(feature_error) or | numpy.isinf(feature_error) | numpy.isinf |
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('/home/groups/ZuckermanLab/copperma/cell/celltraj')
import celltraj
import h5py
import pickle
import os
import subprocess
import time
sys.path.append('/home/groups/ZuckermanLab/copperma/msmWE/BayesianBootstrap')
import bootstrap
import umap
import pyemma.coordinates as coor
import scipy
modelList=['PBS_17nov20','EGF_17nov20','HGF_17nov20','OSM_17nov20','BMP2_17nov20','IFNG_17nov20','TGFB_17nov20']
nmodels=len(modelList)
modelSet=[None]*nmodels
for i in range(nmodels):
modelName=modelList[i]
objFile=modelName+'_coords.obj'
objFileHandler=open(objFile,'rb')
modelSet[i]=pickle.load(objFileHandler)
objFileHandler.close()
wctm=celltraj.cellTraj()
fileSpecifier='/home/groups/ZuckermanLab/copperma/cell/live_cell/mcf10a/batch_17nov20/*_17nov20.h5'
print('initializing...')
wctm.initialize(fileSpecifier,modelName)
nfeat=modelSet[0].Xf.shape[1]
Xf=np.zeros((0,nfeat))
indtreatment=np.array([])
indcellSet=np.array([])
for i in range(nmodels):
Xf=np.append(Xf,modelSet[i].Xf,axis=0)
indtreatment=np.append(indtreatment,i*np.ones(modelSet[i].Xf.shape[0]))
indcellSet=np.append(indcellSet,modelSet[i].cells_indSet)
indtreatment=indtreatment.astype(int)
indcellSet=indcellSet.astype(int)
Xpca,pca=wctm.get_pca_fromdata(Xf,var_cutoff=.9)
wctm.Xpca=Xpca
wctm.pca=pca
for i in range(nmodels):
indsf=np.where(indtreatment==i)[0]
modelSet[i].Xpca=Xpca[indsf,:]
all_trajSet=[None]*nmodels
for i in range(nmodels):
modelSet[i].get_unique_trajectories()
all_trajSet[i]=modelSet[i].trajectories.copy()
self=wctm
for trajl in [8]: #,2,3,4,5,6,7,8,9,10,12,14,16,18,20,25,30,26]:
wctm.trajl=trajl
Xpcat=np.zeros((0,pca.ndim*trajl))
indtreatment_traj=np.array([])
indstack_traj=np.array([])
indframes_traj=np.array([])
cellinds0_traj=np.array([])
cellinds1_traj=np.array([])
for i in range(nmodels):
modelSet[i].trajectories=all_trajSet[i].copy()
modelSet[i].trajl=trajl
modelSet[i].traj=modelSet[i].get_traj_segments(trajl)
data=modelSet[i].Xpca[modelSet[i].traj,:]
data=data.reshape(modelSet[i].traj.shape[0],modelSet[i].Xpca.shape[1]*trajl)
Xpcat=np.append(Xpcat,data,axis=0)
indtreatment_traj=np.append(indtreatment_traj,i*np.ones(data.shape[0]))
indstacks=modelSet[i].cells_imgfileSet[modelSet[i].traj[:,0]]
indstack_traj=np.append(indstack_traj,indstacks)
indframes=modelSet[i].cells_frameSet[modelSet[i].traj[:,0]]
indframes_traj=np.append(indframes_traj,indframes)
cellinds0=modelSet[i].traj[:,0]
cellinds0_traj=np.append(cellinds0_traj,cellinds0)
cellinds1=modelSet[i].traj[:,-1]
cellinds1_traj=np.append(cellinds1_traj,cellinds1)
cellinds0_traj=cellinds0_traj.astype(int)
cellinds1_traj=cellinds1_traj.astype(int)
for neigen in [2]: #[1,2,3,4,5]:
reducer=umap.UMAP(n_neighbors=200,min_dist=0.1, n_components=neigen, metric='euclidean')
trans = reducer.fit(Xpcat)
x=trans.embedding_
indst=np.arange(x.shape[0]).astype(int)
wctm.Xtraj=x.copy()
wctm.indst=indst.copy()
indconds=np.array([[0,1],[2,3],[4,5]]).astype(int)
ncond=3
fl=12
fu=96
nbinstotal=15.*15.
indstw=np.where(np.logical_and(indframes_traj[indst]<fu,indframes_traj[indst]>fl))[0]
probSet=[None]*nmodels
avoverlapSet=np.zeros(ncond)
prob1,edges=np.histogramdd(x[indstw,:],bins=int(np.ceil(nbinstotal**(1./neigen))),density=True)
for icond in range(ncond):
imf1=indconds[icond,0]
imf2=indconds[icond,1]
inds_imf1=np.where(indstack_traj==imf1)[0]
inds_imf2=np.where(indstack_traj==imf2)[0]
inds_cond=np.append(inds_imf1,inds_imf2)
for imf in range(nmodels):
indstm=np.where(indtreatment_traj==imf)[0]
indstm_cond=np.intersect1d(indstm,inds_cond)
xt=x[indstm_cond,0:neigen]
indg=np.where(np.logical_not(np.logical_or(np.isnan(xt[:,0]),np.isinf(xt[:,0]))))[0]
xt=xt[indg]
prob,edges2=np.histogramdd(xt,bins=edges,density=True) #for d=1
prob=prob/np.sum(prob)
probSet[imf]=prob.copy()
poverlapMatrix=np.zeros((nmodels,nmodels))
for i in range(nmodels):
for j in range(nmodels):
probmin=np.minimum(probSet[i],probSet[j])
poverlapMatrix[i,j]=np.sum(probmin)
avoverlapSet[icond]=np.mean(poverlapMatrix[np.triu_indices(nmodels,1)])
sys.stdout.write('avoverlap: '+str(avoverlapSet[0])+' '+str(avoverlapSet[1])+' '+str(avoverlapSet[2])+'\n')
#np.savetxt('avoverlapSet_UMAP_trajl'+str(trajl)+'_ndim'+str(neigen)+'_19feb21.dat',avoverlapSet)
for i in range(nmodels):
modelSet[i].trajectories=all_trajSet[i].copy()
indconds=np.array([[0,1],[2,3],[4,5]]).astype(int)
ncond=3
probSet=[None]*nmodels
sigdxSet=np.zeros(ncond)
sigxSet=np.zeros(ncond)
dxSet=np.zeros(ncond)
x0=np.zeros((0,neigen))
x1=np.zeros((0,neigen))
for icond in range(ncond):
imf1=indconds[icond,0]
imf2=indconds[icond,1]
inds_imf1=np.where(indstack_traj==imf1)[0]
inds_imf2=np.where(indstack_traj==imf2)[0]
inds_cond=np.append(inds_imf1,inds_imf2)
for imf in range(nmodels):
indstm=np.where(indtreatment_traj==imf)[0]
indstm_cond=np.intersect1d(indstm,inds_cond)
modelSet[imf].Xtraj=x[indstm,0:neigen]
indstm_cond_model=indstm_cond-np.min(indstm) #index in model
modelSet[imf].get_trajectory_steps(inds=None,get_trajectories=False,traj=modelSet[imf].traj[indstm_cond_model,:],Xtraj=modelSet[imf].Xtraj[indstm_cond_model,:])
x0=np.append(x0,modelSet[imf].Xtraj0,axis=0)
x1=np.append(x1,modelSet[imf].Xtraj1,axis=0)
g0=np.logical_not(np.logical_or(np.isnan(x0[:,0]),np.isinf(x0[:,0])))
g1=np.logical_not(np.logical_or(np.isnan(x1[:,0]),np.isinf(x1[:,0])))
indg=np.where(np.logical_and(g0,g1))[0]
x0=x0[indg,:]
x1=x1[indg,:]
avdx=np.median(x1-x0,axis=0)
avdxsq=np.median(np.power(x1-x0,2),axis=0)
sigdx=np.sqrt(np.sum(avdxsq-np.power(avdx,2)))
avx=np.mean(x0,axis=0)
avxsq=np.mean(np.power(x0-avx,2),axis=0)
sigx=np.sqrt(np.sum(avxsq))
sys.stdout.write('sigx: '+str(sigx)+' sigdx: '+str(sigdx)+'\n')
dxSet[icond]=sigdx/sigx #np.sum(dxcorr[0:4]) #np.mean(dr[inds_xr])
sigxSet[icond]=sigx
sigdxSet[icond]=sigdx
sys.stdout.write('dx ratio: '+str(dxSet[0])+' '+str(dxSet[1])+' '+str(dxSet[2])+'\n')
#np.savetxt('dxratioSet_UMAP_trajl'+str(trajl)+'_ndim'+str(neigen)+'_19feb21.dat',dxSet)
fl=12
fu=96 #frames for time window
indstw=np.where(np.logical_and(indframes_traj<fu,indframes_traj>fl))[0]
inds_imft=np.array([])
for imf in range(5):
inds_imft=np.append(inds_imft,np.where(indstack_traj==imf)[0])
for i in range(nmodels):
modelSet[i].trajectories=all_trajSet[i].copy()
inds_imft=inds_imft.astype(int)
inds_imfv=np.where(indstack_traj==5)[0]
inds_test=np.intersect1d(indstw,inds_imft)
inds_val=np.intersect1d(indstw,inds_imfv)
for n_clusters in [10,50,100,200]:
wctm.cluster_trajectories(n_clusters,x=x)
entpSet=np.zeros(nmodels)
for i in range(nmodels):
indstm=np.where(indtreatment_traj==i)[0]
modelSet[i].Xtraj=x[indstm,0:neigen]
indstm_test=np.intersect1d(indstm,inds_test)
indstm_test_model=indstm_test-np.min(indstm) #index in model
modelSet[i].get_trajectory_steps(inds=None,get_trajectories=False,traj=modelSet[i].traj[indstm_test_model,:],Xtraj=modelSet[i].Xtraj[indstm_test_model,:])
x0=modelSet[i].Xtraj0
x1=modelSet[i].Xtraj1
wctm.get_transition_matrix(x0,x1)
indstm_val=np.intersect1d(indstm,inds_val)
indstm_val_model=indstm_val-np.min(indstm) #index in model
modelSet[i].get_trajectory_steps(inds=None,get_trajectories=False,traj=modelSet[i].traj[indstm_val_model,:],Xtraj=modelSet[i].Xtraj[indstm_val_model,:])
x0v=modelSet[i].Xtraj0
x1v=modelSet[i].Xtraj1
entp=wctm.get_path_entropy_2point(x0v,x1v,exclude_stays=True)
entpSet[i]=entp
sys.stdout.write('mean entp across treatments: '+str(np.mean(entpSet))+'\n')
#np.savetxt('entpSet_UMAP_trajl'+str(trajl)+'_ndim'+str(neigen)+'_nc'+str(n_clusters)+'_19feb21.dat',entpSet)
for i in range(nmodels):
modelSet[i].trajectories=all_trajSet[i].copy()
dlfkj
import scipy
knn=200
dxSet=np.zeros((nmodels,n_clusters))
nt=5
xbins=np.arange(nt)*.5
xbins_spl=np.linspace(xbins[0],xbins[-1],100)
clusters=wctm.clusterst
for i in range(nmodels):
for iclust in range(n_clusters):
xc=np.array([clusters.clustercenters[iclust,:]])
dmatr=wctm.get_dmat(modelSet[i].Xtraj,xc) #get closest cells to cluster center
indr=np.argsort(dmatr[:,0])
indr=indr[0:knn]
cellindsr=modelSet[i].traj[indr,-1]
modelSet[i].get_unique_trajectories(cell_inds=cellindsr)
try:
dxcorr=modelSet[i].get_dx_tcf(trajectories=modelSet[i].trajectories)
except:
dxcorr=np.ones(nt)*np.nan
if dxcorr.size<nt:
dxcorr_r=np.ones(nt)*np.nan
dxcorr_r[0:dxcorr.size]=dxcorr
dxcorr=dxcorr_r
spl=scipy.interpolate.interp1d(xbins,dxcorr[0:nt])
dxcorr_spl=spl(xbins_spl)
dxSet[i,iclust]=np.trapz(dxcorr_spl/dxcorr_spl[0],x=xbins_spl) #np.sum(dxcorr[0:4]) #np.mean(dr[inds_xr])
#stdxSet[iclust,icond]=np.std(dr[inds_xr])
plt.figure(figsize=(7,12))
nbins=10
plt.subplot(4,2,1)
vdist1,xedges1,yedges1=np.histogram2d(clusters.clustercenters[:,0],clusters.clustercenters[:,1],bins=nbins,weights=np.mean(dxSet,axis=0))
norm1,xedges1,yedges1=np.histogram2d(clusters.clustercenters[:,0],clusters.clustercenters[:,1],bins=[xedges1,yedges1])
vdist1=np.divide(vdist1,norm1)
indnan=np.where(np.isnan(vdist1))
indgood=np.where(np.logical_not(np.isnan(vdist1)))
xedges1c=.5*(xedges1[1:]+xedges1[0:-1])
yedges1c=.5*(yedges1[1:]+yedges1[0:-1])
xx,yy=np.meshgrid(xedges1c,yedges1c)
#levels=np.linspace(np.min(vdist1[indgood]),np.max(vdist1[indgood]),20)
levels=np.linspace(.1,.55,20)
plt.contourf(xx,yy,vdist1.T,cmap=plt.cm.jet,levels=levels)
cbar=plt.colorbar()
cbar.set_label('repolarization time (hrs)')
plt.title('average')
plt.pause(.1)
plt.axis('off')
for i in range(nmodels):
plt.subplot(4,2,i+2)
vdist1,xedges1,yedges1=np.histogram2d(clusters.clustercenters[:,0],clusters.clustercenters[:,1],bins=nbins,weights=dxSet[i,:])
norm1,xedges1,yedges1=np.histogram2d(clusters.clustercenters[:,0],clusters.clustercenters[:,1],bins=[xedges1,yedges1])
vdist1=np.divide(vdist1,norm1)
indnan=np.where(np.isnan(vdist1))
indgood=np.where(np.logical_not(np.isnan(vdist1)))
xedges1c=.5*(xedges1[1:]+xedges1[0:-1])
yedges1c=.5*(yedges1[1:]+yedges1[0:-1])
xx,yy=np.meshgrid(xedges1c,yedges1c)
plt.contourf(xx,yy,vdist1.T,cmap=plt.cm.jet,levels=levels)
#plt.xlabel('UMAP 1')
#plt.ylabel('UMAP 2')
plt.axis('off')
plt.title(tmSet[i])
plt.pause(.1)
plt.savefig('mcf10a_repolarization_24feb21.png')
knn=50
n_clusters=200
wctm.cluster_trajectories(n_clusters,x=x)
clusters=wctm.clusterst
dxs=np.zeros((nmodels,n_clusters,2))
for i in range(nmodels):
indstm=np.where(indtreatment_traj==i)[0]
modelSet[i].Xtraj=x[indstm,0:neigen]
indstm_model=indstm-np.min(indstm) #index in model
modelSet[i].get_trajectory_steps(inds=None,get_trajectories=False,traj=modelSet[i].traj[indstm_model,:],Xtraj=modelSet[i].Xtraj[indstm_model,:])
x0=modelSet[i].Xtraj0
x1=modelSet[i].Xtraj1
dx=x1-x0
for iclust in range(n_clusters):
xc=np.array([clusters.clustercenters[iclust,:]])
dmatr=wctm.get_dmat(modelSet[i].Xtraj[modelSet[i].inds_trajp1[:,-1],:],xc) #get closest cells to cluster center
indr=np.argsort(dmatr[:,0])
indr=indr[0:knn]
cellindsr=modelSet[i].traj[[modelSet[i].inds_trajp1[indr,-1]],-1]
dxs[i,iclust,:]=np.mean(dx[indr,:],axis=0)
tmSet=['PBS','EGF','HGF','OSM','BMP2','IFNG','TGFB']
nbins=15
fl=12
fu=96 #frames for time window
indstw=np.where(np.logical_and(indframes_traj[indst]<fu,indframes_traj[indst]>fl))[0]
probSet=[None]*nmodels
plt.subplot(4,2,1)
prob1,xedges1,yedges1=np.histogram2d(x[indstw,0],x[indstw,1],bins=nbins,density=True)
xx,yy=np.meshgrid(xedges1[1:],yedges1[1:])
#prob1=prob1/np.sum(prob1)
#levels=np.linspace(0,.09,100)
levels=np.linspace(0,np.max(prob1),100)
#levels=np.append(levels,1.)
cs=plt.contourf(xx,yy,prob1.T,levels=levels,cmap=plt.cm.jet)
#plt.clim(0,0.03)
cs.cmap.set_over('darkred')
cbar1=plt.colorbar()
cbar1.set_label('prob density')
plt.title('combined')
plt.axis('off')
for imf in range(nmodels):
tm=modelList[imf][0:4]
indstm=np.where(indtreatment_traj==imf)[0]
indstwm=np.intersect1d(indstm,indstw)
prob,xedges2,yedges2=np.histogram2d(x[indstwm,0],x[indstwm,1],bins=[xedges1,yedges1],density=True)
#prob=prob/np.sum(prob)
probSet[imf]=prob.copy()
plt.subplot(4,2,imf+2)
#levels=np.linspace(0,np.max(prob),100)
cs=plt.contourf(xx,yy,prob.T,levels=levels,cmap=plt.cm.jet,extend='both')
#plt.clim(0,0.03)
cs.cmap.set_over('darkred')
plt.axis('off')
plt.pause(.1)
dxsav=np.mean(dxs,axis=0)
plt.subplot(4,2,1)
plt.title('average')
plt.axis('off')
for ic in range(n_clusters):
ax=plt.gca()
ax.arrow(clusters.clustercenters[ic,0],clusters.clustercenters[ic,1],dxsav[ic,0],dxsav[ic,1],head_width=.1,linewidth=.5,color='white',alpha=1.0)
for i in range(nmodels):
plt.subplot(4,2,i+2)
ax=plt.gca()
for ic in range(n_clusters):
ax.arrow(clusters.clustercenters[ic,0],clusters.clustercenters[ic,1],dxs[i,ic,0],dxs[i,ic,1],head_width=.1,linewidth=.5,color='white',alpha=1.0)
#plt.xlabel('UMAP 1')
#plt.ylabel('UMAP 2')
plt.axis('off')
#plt.title(tmSet[i])
plt.pause(.1)
plt.savefig('mcf10a_prob_flows_24feb21.png')
plt.figure(figsize=(8,6))
nbins=15
fl=12
fu=96 #frames for time window
indstw=np.where(np.logical_and(indframes_traj[indst]<fu,indframes_traj[indst]>fl))[0]
probSet=[None]*nmodels
prob1,xedges1,yedges1=np.histogram2d(x[indstw,0],x[indstw,1],bins=nbins,density=True)
xx,yy=np.meshgrid(xedges1[1:],yedges1[1:])
levels=np.linspace(0,np.max(prob1),100)
for imf in range(nmodels):
indstm=np.where(indtreatment_traj==imf)[0]
indstwm=np.intersect1d(indstm,indstw)
prob,xedges2,yedges2=np.histogram2d(x[indstwm,0],x[indstwm,1],bins=[xedges1,yedges1],density=True)
cs=plt.contourf(xx,yy,prob.T,levels=levels,cmap=plt.cm.jet,extend='both')
#plt.clim(0,0.03)
cs.cmap.set_over('darkred')
#cbar1=plt.colorbar()
#cbar1.set_label('prob density')
plt.axis('off')
plt.pause(.1)
ax=plt.gca()
for ic in range(n_clusters):
ax.arrow(clusters.clustercenters[ic,0],clusters.clustercenters[ic,1],dxs[imf,ic,0],dxs[imf,ic,1],head_width=.1,linewidth=.5,color='white',alpha=1.0)
plt.pause(1)
plt.savefig(tmSet[imf]+'_probflows_tl8_2mar21.png')
plt.clf()
#plot with flows and trajectories
nbins=15
fl=12
fu=96 #frames for time window
indstw=np.where(np.logical_and(indframes_traj[indst]<fu,indframes_traj[indst]>fl))[0]
probSet=[None]*nmodels
prob1,xedges1,yedges1=np.histogram2d(x[indstw,0],x[indstw,1],bins=nbins,density=True)
xx,yy=np.meshgrid(xedges1[1:],yedges1[1:])
levels=np.linspace(0,np.max(prob1),100)
nt=10
minl=24
ctrajSet=[48523,23315,48696,32932,18054,41460,20248]
for imf in [4]: #range(nmodels-1,-1,-1):
modelSet[imf].visual=True
indstm=np.where(indtreatment_traj==imf)[0]
indstwm=np.intersect1d(indstm,indstw)
traj_lengths=np.array([])
for itraj in range(len(modelSet[imf].trajectories)):
traj_lengths=np.append(traj_lengths,modelSet[imf].trajectories[itraj].size)
indtrajs=np.where(traj_lengths>=minl)[0]
#indr=np.random.choice(indtrajs.size,nt,replace=False)
indr=np.arange(indtrajs.size-1,-1,-1).astype(int)
for itrajr in indr:
cell_traj=modelSet[imf].trajectories[indtrajs[itrajr]]
if cell_traj[-1]==ctrajSet[imf]:
plt.figure(figsize=(8,6))
xt,inds_traj=get_Xtraj_celltrajectory(modelSet[imf],cell_traj,Xtraj=None,traj=None)
prob,xedges2,yedges2=np.histogram2d(x[indstwm,0],x[indstwm,1],bins=[xedges1,yedges1],density=True)
cs=plt.contourf(xx,yy,prob.T,levels=levels,cmap=plt.cm.Greys,extend='both')
#plt.clim(0,0.03)
cs.cmap.set_over('black')
#cbar1=plt.colorbar()
#cbar1.set_label('prob density')
plt.axis('off')
ax=plt.gca()
for ic in range(n_clusters):
ax.arrow(clusters.clustercenters[ic,0],clusters.clustercenters[ic,1],dxs[imf,ic,0],dxs[imf,ic,1],head_width=.1,linewidth=.5,color='goldenrod',alpha=1.0) #.2,.75
for itt in range(xt.shape[0]-1):
t=modelSet[imf].cells_frameSet[cell_traj[itt+trajl-1]]*.5
ax.arrow(xt[itt,0],xt[itt,1],xt[itt+1,0]-xt[itt,0],xt[itt+1,1]-xt[itt,1],head_width=.2,linewidth=1.0,color=plt.cm.winter(1.*itt/xt.shape[0]),alpha=1.0) #.4,1.5
t0=modelSet[imf].cells_frameSet[cell_traj[0]]*.5
tf=modelSet[imf].cells_frameSet[cell_traj[-1]]*.5
plt.title('t0='+str(t0)+' tf='+str(tf))
plt.pause(1)
plt.savefig(tmSet[imf]+'_probflows_tl8_c'+str(cell_traj[-1])+'_4mar21.png')
plt.close()
show_cells(modelSet[imf],cell_traj)
plt.savefig(tmSet[imf]+'_celltraj_tl8_c'+str(cell_traj[-1])+'_4mar21.png')
plt.close()
def get_Xtraj_celltrajectory(self,cell_traj,Xtraj=None,traj=None): #traj and
if traj is None:
traj=self.traj
if Xtraj is None:
x=self.Xtraj
else:
x=Xtraj
ntraj=cell_traj.size
neigen=x.shape[1]
xt=np.zeros((0,neigen))
inds_traj=np.array([])
for itraj in range(ntraj-self.trajl):
test=cell_traj[itraj:itraj+trajl]
res = (traj[:, None] == test[np.newaxis,:]).all(-1).any(-1)
if np.sum(res)==1:
indt=np.where(res)[0][0]
xt=np.append(xt,np.array([x[indt,:]]),axis=0)
inds_traj=np.append(inds_traj,indt)
return xt,inds_traj.astype(int)
def show_cells(self,cell_inds,show_segs=False):
if self.visual:
ncells=cell_inds.size
nb=int(np.ceil(np.sqrt(ncells)))
fig, ax = plt.subplots(nrows=nb, ncols=nb, figsize=(12, 16), sharex='all', sharey='all')
#plt.figure(figsize=(12,16))
#fig,ax=plt.subplots(nrows=nb,ncols=2,sharex='all',sharey='all')
inds=np.arange(nb*nb).astype(int)
inds2d=np.unravel_index(inds,(nb,nb))
inds2d1b=inds2d[1].reshape(nb,nb)
for ir in range(1,nb,2):
inds2d1b[ir]=np.flip(inds2d1b[ir])
inds2d=(inds2d[0],inds2d1b.flatten())
for ic in range(nb*nb):
if ic<ncells:
self.get_cellborder_images(indcells=np.array([cell_inds[ic]]),bordersize=40)
imgcell=self.cellborder_imgs[0]
mskcell=self.cellborder_msks[0]
fmskcell=self.cellborder_fmsks[0]
ccborder,csborder=self.get_cc_cs_border(mskcell,fmskcell)
img_fg=ax[inds2d[0][ic],inds2d[1][ic]].imshow(np.ma.masked_where(fmskcell == 0, imgcell),cmap=plt.cm.seismic,clim=(-10,10),alpha=1.0)
img_bg=ax[inds2d[0][ic],inds2d[1][ic]].imshow(np.ma.masked_where(fmskcell == 1, imgcell),cmap=plt.cm.gray,clim=(-10,10),alpha=0.6)
nx=imgcell.shape[0]; ny=imgcell.shape[1]
xx,yy=np.meshgrid(np.arange(nx),np.arange(ny),indexing='ij')
cmskx=np.sum(np.multiply(xx,mskcell))/np.sum(mskcell)
cmsky=np.sum(np.multiply(yy,mskcell))/np.sum(mskcell)
if show_segs:
scatter_cc=ax[inds2d[0][ic],inds2d[1][ic]].scatter(np.where(ccborder)[1],np.where(ccborder)[0],s=4,c='purple',marker='s',alpha=0.2)
scatter_cs=ax[inds2d[0][ic],inds2d[1][ic]].scatter(np.where(csborder)[1],np.where(csborder)[0],s=4,c='green',marker='s',alpha=0.2)
else:
scatter_x=ax[inds2d[0][ic],inds2d[1][ic]].scatter(cmsky,cmskx,s=500,color='black',marker='x',alpha=0.2)
ax[inds2d[0][ic],inds2d[1][ic]].axis('off')
else:
ax[inds2d[0][ic],inds2d[1][ic]].axis('off')
plt.tight_layout()
plt.pause(1)
else:
sys.stdout.write('not in visual mode...\n')
#2D cdf
plt.figure(figsize=(7,12))
nbins=15
fl=12
fu=96 #frames for time window
indstw=np.where(np.logical_and(indframes_traj[indst]<fu,indframes_traj[indst]>fl))[0]
probSet=[None]*nmodels
plt.subplot(4,2,1)
prob1,xedges1,yedges1=np.histogram2d(x[indstw,0],x[indstw,1],bins=nbins,density=True)
xx,yy=np.meshgrid(xedges1[1:],yedges1[1:])
#prob1=prob1/np.sum(prob1)
levels=np.linspace(0,1,11)
level=np.array([.66,.99])
prob1=prob1/np.sum(prob1)
prob1=prob1.flatten()
indprob1=np.argsort(prob1)
probc1=np.zeros_like(prob1)
probc1[indprob1]=np.cumsum(prob1[indprob1])
probc1=probc1.reshape((nbins,nbins))
#levels=np.append(levels,1.)
cs=plt.contourf(xx,yy,probc1.T,levels=levels,cmap=plt.cm.jet)
#plt.clim(0,0.03)
#cs.cmap.set_over('darkred')
cbar1=plt.colorbar()
cbar1.set_label('cumulative probability')
plt.title('combined')
plt.axis('off')
for imf in range(nmodels):
tm=modelList[imf][0:4]
indstm=np.where(indtreatment_traj==imf)[0]
indstwm=np.intersect1d(indstm,indstw)
prob,xedges2,yedges2=np.histogram2d(x[indstwm,0],x[indstwm,1],bins=[xedges1,yedges1],density=True)
probSet[imf]=prob.copy()
prob=prob/np.sum(prob)
prob=prob.flatten()
indprob=np.argsort(prob)
probc=np.zeros_like(prob)
probc[indprob]=np.cumsum(prob[indprob])
probc=probc.reshape((nbins,nbins))
plt.subplot(4,2,imf+2)
#levels=np.linspace(0,np.max(prob),100)
cs=plt.contour(xx,yy,probc.T,levels=levels,cmap=plt.cm.jet) #colors=[plt.cm.jet(1.*imf/nmodels)],linewidths=2)
csf=plt.contourf(xx,yy,probc.T,levels=levels,cmap=plt.cm.jet) #colors=[plt.cm.jet(1.*imf/nmodels)],alpha=0.3) #cmap=plt.cm.jet,extend='both')
plt.axis('off')
plt.title(tmSet[imf])
#plt.subplot(8,1,1)
#cs=plt.contour(xx,yy,probc.T,levels=level,colors=[plt.cm.jet(1.*imf/nmodels)],linewidths=2)
#csf=plt.contourf(xx,yy,probc.T,levels=level,colors=[plt.cm.jet(1.*imf/nmodels)],alpha=0.3)
#xmax=xx.flatten()[np.argsort(probSet[imf].T.flatten())[-1]]
#ymax=yy.flatten()[np.argsort(probSet[imf].T.flatten())[-1]]
#plt.scatter(xmax,ymax,s=1000,color=plt.cm.jet(1.*imf/nmodels),marker='x')
#csf=plt.contourf(xx,yy,probc.T,levels=levels,cmap=plt.cm.jet) #colors=[plt.cm.jet(1.*imf/nmodels)],alpha=0.3) #cmap=plt.cm.jet,extend='both')
#plt.clim(0,0.03)
#cs.cmap.set_over('darkred')
#plt.axis('off')
plt.pause(.1)
plt.savefig('mcf10a_cdist_trajl8_2mar21.png')
plt.figure()
for imf in range(nmodels):
indstm=np.where(indtreatment_traj==imf)[0]
indstwm=np.intersect1d(indstm,indstw)
prob,xedges2,yedges2=np.histogram2d(x[indstwm,0],x[indstwm,1],bins=[xedges1,yedges1],density=True)
probSet[imf]=prob.copy()
prob=prob/np.sum(prob)
prob=prob.flatten()
indprob=np.argsort(prob)
probc=np.zeros_like(prob)
probc[indprob]=np.cumsum(prob[indprob])
probc=probc.reshape((nbins,nbins))
cs=plt.contour(xx,yy,probc.T,levels=level,colors=[plt.cm.jet(1.*imf/nmodels)],linewidths=2)
csf=plt.contourf(xx,yy,probc.T,levels=level,colors=[plt.cm.jet(1.*imf/nmodels)],alpha=0.3)
xmax=xx.flatten()[np.argsort(probSet[imf].T.flatten())[-1]]
ymax=yy.flatten()[np.argsort(probSet[imf].T.flatten())[-1]]
plt.scatter(xmax,ymax,s=1000,color=plt.cm.jet(1.*imf/nmodels),marker='x')
#csf=plt.contourf(xx,yy,probc.T,levels=levels,cmap=plt.cm.jet) #colors=[plt.cm.jet(1.*imf/nmodels)],alpha=0.3) #cmap=plt.cm.jet,extend='both')
#plt.clim(0,0.03)
#cs.cmap.set_over('darkred')
plt.axis('off')
plt.pause(.1)
plt.savefig('mcf10a_trajl8_cdfl1_2mar21.png')
plt.clf()
for imf in range(nmodels):
indstm=np.where(indtreatment_traj==imf)[0]
indstwm=np.intersect1d(indstm,indstw)
prob,xedges2,yedges2=np.histogram2d(x[indstwm,0],x[indstwm,1],bins=[xedges1,yedges1],density=True)
prob=prob/np.sum(prob)
probSet[imf]=prob.copy()
poverlapMatrix=np.zeros((nmodels,nmodels))
for i in range(nmodels):
for j in range(nmodels):
probmin=np.minimum(probSet[i],probSet[j])
poverlapMatrix[i,j]=np.sum(probmin)
ax=plt.gca()
avoverlap=np.mean(poverlapMatrix[np.triu_indices(nmodels,1)])
plt.imshow(poverlapMatrix,cmap=plt.cm.jet)
plt.clim(0,1)
cbar=plt.colorbar()
cbar.set_label('overlap '+r'$\sum min(p1,p2)$')
# We want to show all ticks...
ax.set_xticks(np.arange(len(tmSet)))
ax.set_yticks(np.arange(len(tmSet)))
ax.set_xticklabels(tmSet)
ax.set_yticklabels(tmSet)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",rotation_mode="anchor")
tstr='mean overlap %.2f' % avoverlap
ax.set_title(tstr)
plt.pause(.1)
plt.savefig('mcf10a_poverlap_trajl1_2mar21.png')
npm=3
nimp=6
magdx=1.0;magdy=1.0
for i in range(nmodels):
modelSet[i].visual=True
pts=np.zeros((0,2))
xset=xx.flatten()[np.argsort(probSet[i].T.flatten())[-npm:]]
yset=yy.flatten()[np.argsort(probSet[i].T.flatten())[-npm:]]
dxset=magdx*(np.random.rand(nimp)-.5)
dyset=magdy*(np.random.rand(nimp)-.5)
for ix in range(npm):
dxset=magdx*(np.random.rand(nimp)-.5)
dyset=magdy*(np.random.rand(nimp)-.5)
pts=np.append(pts,np.array([xset[ix]+dxset,yset[ix]+dyset]).T,axis=0)
pathto='24feb21/'+tmSet[i]
cmd='mkdir '+pathto
os.system(cmd)
explore_2D_celltraj_nn(modelSet[i],modelSet[i].Xtraj,modelSet[i].traj,pathto=pathto,coordlabel='UMAP',show_segs=False,pts=pts)
xset=np.zeros(1)
yset=np.zeros(1)
for i in range(nmodels):
modelSet[i].visual=True
pts=np.zeros((0,2))
x=xx.flatten()[np.argsort(probSet[i].T.flatten())[-1]]
y=yy.flatten()[np.argsort(probSet[i].T.flatten())[-1]]
xset=np.append(xset,x)
yset=np.append(yset,y)
nimp=4
magdx=.1;magdy=.1
xset[0]=8.3
yset[0]=-3.35
npm=xset.size
for i in range(nmodels):
modelSet[i].visual=True
pts=np.zeros((0,2))
dxset=magdx*(np.random.rand(nimp)-.5)
dyset=magdy*(np.random.rand(nimp)-.5)
for ix in range(npm):
dxset=magdx*(np.random.rand(nimp)-.5)
dyset=magdy*(np.random.rand(nimp)-.5)
pts=np.append(pts,np.array([xset[ix]+dxset,yset[ix]+dyset]).T,axis=0)
pathto='24feb21/'+tmSet[i]+'_match_'
#cmd='mkdir '+pathto
#os.system(cmd)
explore_2D_celltraj_nn(modelSet[i],modelSet[i].Xtraj,modelSet[i].traj,pathto=pathto,coordlabel='UMAP',show_segs=False,pts=pts)
def explore_2D_celltraj_nn(self,x,traj,pts=None,npts=20,dm1=None,dm2=None,pathto='./',coordlabel='coord',show_segs=True):
if self.visual:
plt.figure(figsize=(10,4))
ipath=0
trajl=traj.shape[1]
if dm1 is None:
dm1=0
dm2=1
indx=np.array([dm1,dm2]).astype(int)
plt.subplot(1,1+trajl,1)
scatter_x=plt.scatter(x[:,dm1],x[:,dm2],s=5,c='black')
plt.title('choose '+str(npts)+' points')
plt.pause(.1)
if pts is None:
pts = np.asarray(plt.ginput(npts, timeout=-1))
else:
npts=pts.shape[0]
#xc=np.array([x[traj[:,0],dm1],x[traj[:,0],dm2]]).T
dmat=self.get_dmat(x,pts)
dmat[np.where(np.logical_or(np.isnan(dmat),np.isinf(dmat)))]=np.inf
ind_nn=np.zeros(npts)
for ip in range(npts):
ind_nn[ip]=np.argmin(dmat[:,ip])
ind_nn=ind_nn.astype(int)
ptSet=np.zeros((0,2))
plt.clf()
for ipts in range(npts):
plt.subplot(1,1+trajl,1)
scatter_x=plt.scatter(x[:,dm1],x[:,dm2],s=5,c='black')
plt.scatter(pts[ipts,0],pts[ipts,1],s=50,c='red')
plt.xlabel(coordlabel+' '+str(dm1+1))
plt.ylabel(coordlabel+' '+str(dm2+1))
traj_it=traj[ind_nn[ipts],:]
for il in range(trajl):
ax2=plt.subplot(1,1+trajl,il+2)
self.get_cellborder_images(indcells=np.array([traj_it[il]]),bordersize=40)
imgcell=self.cellborder_imgs[0]
mskcell=self.cellborder_msks[0]
fmskcell=self.cellborder_fmsks[0]
ccborder,csborder=self.get_cc_cs_border(mskcell,fmskcell)
img_fg=plt.imshow(np.ma.masked_where(fmskcell == 0, imgcell),cmap=plt.cm.seismic,clim=(-10,10),alpha=1.0)
img_bg=plt.imshow(np.ma.masked_where(fmskcell == 1, imgcell),cmap=plt.cm.gray,clim=(-10,10),alpha=0.6)
nx=imgcell.shape[0]; ny=imgcell.shape[1]
xx,yy=np.meshgrid(np.arange(nx),np.arange(ny),indexing='ij')
cmskx=np.sum(np.multiply(xx,mskcell))/np.sum(mskcell)
cmsky=np.sum(np.multiply(yy,mskcell))/np.sum(mskcell)
if show_segs:
scatter_cc=plt.scatter(np.where(ccborder)[1],np.where(ccborder)[0],s=4,c='purple',marker='s',alpha=0.2)
scatter_cs=plt.scatter( | np.where(csborder) | numpy.where |
import argparse
import os
import random
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import yaml
from addict import Dict
from sklearn.metrics import auc, roc_curve
from torch.utils.data import DataLoader
from libs.checkpoint import resume
from libs.dataset import ShapeNeth5pyDataset
from libs.emd.emd_module import emdModule
from libs.foldingnet import FoldingNet, SkipFoldingNet, SkipValiationalFoldingNet
from libs.loss import ChamferLoss
def get_parameters():
"""
make parser to get parameters
"""
parser = argparse.ArgumentParser(description="take config file path")
parser.add_argument("config", type=str, help="path of a config file for testing")
parser.add_argument(
"--checkpoint_path",
type=str,
help="path of the file where the weight is saved",
)
parser.add_argument(
"-c",
"--chamfer",
action="store_true",
help="Whether to add a chamfer score or not",
)
parser.add_argument(
"-e",
"--emd",
action="store_true",
help="Whether to add a emd score or not",
)
parser.add_argument(
"-k",
"--kldiv",
action="store_true",
help="Whether to add a kldiv score or not",
)
parser.add_argument(
"-f",
"--feature_diff",
action="store_true",
help="Whether to add a feature diff score or not",
)
parser.add_argument(
"--histgram",
action="store_true",
help="Visualize histgram or not",
)
parser.add_argument(
"--save_points",
action="store_true",
help="Save points or not",
)
return parser.parse_args()
def rescale(input):
input = np.array(input, dtype=float)
_min = np.array(min(input))
_max = np.array(max(input))
with np.errstate(invalid="ignore"):
re_scaled = (input - _min) / (_max - _min)
return re_scaled
def vis_histgram(label: List, result: List, save_name: str) -> None:
normal_result = []
abnormal_result = []
for lbl, r in zip(label, result):
if lbl == 0:
normal_result.append(r * 1000)
else:
abnormal_result.append(r * 1000)
bin_max = max(max(normal_result), max(abnormal_result))
bins = np.linspace(0, bin_max, 100)
plt.hist(normal_result, bins, alpha=0.5, label="normal")
plt.hist(abnormal_result, bins, alpha=0.5, label="abnormal")
plt.xlabel("Anomaly Score")
plt.ylabel("Number of samples")
plt.legend(loc="upper right")
plt.savefig(save_name)
plt.close()
def main():
# seedの固定
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
set_seed(0)
args = get_parameters()
# configuration
with open(args.config, "r") as f:
config_dict = yaml.safe_load(f)
CONFIG = Dict(config_dict)
print(config_dict)
torch.autograd.set_detect_anomaly(True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
test_dataset = ShapeNeth5pyDataset(
root_path=CONFIG.root_path,
split="test",
normal_class=CONFIG.normal_class,
abnormal_class=CONFIG.abnormal_class,
n_point=CONFIG.n_points,
random_rotate=False,
random_jitter=False,
random_translate=False,
)
test_dataloader = DataLoader(
test_dataset, batch_size=CONFIG.test_batch_size, shuffle=False
)
if CONFIG.model == "FoldingNet":
model = FoldingNet(CONFIG.n_points, CONFIG.feat_dims, CONFIG.shape)
elif CONFIG.model == "SkipFoldingNet":
model = SkipFoldingNet(CONFIG.n_points, CONFIG.feat_dims, CONFIG.shape)
elif CONFIG.model == "SkipVariationalFoldingNet":
model = SkipValiationalFoldingNet(
CONFIG.n_points, CONFIG.feat_dims, CONFIG.shape
)
model.to(device)
lr = 0.0001 * 16 / CONFIG.batch_size
beta1, beta2 = 0.9, 0.999
optimizer = torch.optim.Adam(
model.parameters(), lr, [beta1, beta2], weight_decay=1e-6
)
epoch, model, optimizer = resume(args.checkpoint_path, model, optimizer)
print(f"---------- Start testing for epoch{epoch} ----------")
model.eval()
pred = []
labels = [""] * len(test_dataloader.dataset)
names = [""] * len(test_dataloader.dataset)
n = 0
chamferloss = ChamferLoss()
emd_loss = emdModule()
chamfer_scores = []
emd_scores = []
kldiv_scores = []
feature_diff_scores = []
for samples in test_dataloader:
data = samples["data"].float()
label = samples["label"]
name = samples["name"]
mini_batch_size = data.size()[0]
data = data.to(device)
if CONFIG.model == "SkipVariationalFoldingNet":
with torch.no_grad():
output, folding1, mu, log_var = model(data)
if args.kldiv or args.feature_diff:
_, _, fake_mu, fake_log_var = model(output)
if args.chamfer:
for d, o in zip(data, output):
d = d.reshape(1, 2048, -1)
o = o.reshape(1, 2048, -1)
cl = chamferloss(d, o)
chamfer_scores.append(cl)
else:
for _ in range(mini_batch_size):
chamfer_scores.append(0)
if args.emd:
for d, o in zip(data, output):
d = d.reshape(1, 2048, -1)
o = o.reshape(1, 2048, -1)
el, _ = emd_loss(d, o, 0.005, 50)
el = torch.sqrt(el).mean(1)
emd_scores.append(el)
else:
for _ in range(mini_batch_size):
emd_scores.append(0)
if args.kldiv:
for m, l in zip(mu, log_var):
kldiv = torch.mean(
-0.5 * torch.sum(1 + l - m ** 2 - l.exp(), dim=1), dim=0
)
# kldiv = torch.mean(
# 0.5
# * torch.sum(
# m ** 2 + l ** 2 - torch.log(l ** 2 + 1e-12) - 1, dim=1
# ),
# dim=0,
# )
kldiv_scores.append(kldiv)
# for m, l, fm, fl in zip(mu, log_var, fake_mu, fake_log_var):
# P = torch.distributions.Normal(m, l)
# Q = torch.distributions.Normal(fm, fl)
# # kld_loss = torch.distributions.kl_divergence(G, P).mean()
# kldiv = torch.distributions.kl_divergence(P, Q).mean()
# # kldiv = torch.mean(
# # -0.5 * torch.sum(1 + l - m ** 2 - l.exp(), dim=1), dim=0
# # )
# kldiv_scores.append(kldiv)
else:
for _ in range(mini_batch_size):
kldiv_scores.append(0)
if args.feature_diff:
for m, l, fm, fl in zip(mu, log_var, fake_mu, fake_log_var):
std = torch.exp(0.5 * l)
eps = torch.randn_like(std)
feat = eps * std + m
fake_std = torch.exp(0.5 * fl)
fake_eps = torch.randn_like(fake_std)
fake_feat = fake_eps * fake_std + fm
diff_feat = feat - fake_feat
diff_feat = diff_feat.reshape(-1)
feature_diff_score = np.mean(
np.power(diff_feat.to("cpu").numpy(), 2.0)
)
feature_diff_scores.append(feature_diff_score)
else:
for _ in range(mini_batch_size):
feature_diff_scores.append(0)
if args.save_points:
for i in range(mini_batch_size):
o = output[i]
d = data[i]
d = d.reshape(1, 2048, -1)
o = o.reshape(1, 2048, -1)
d = d.to("cpu").numpy()
o = o.to("cpu").numpy()
if not os.path.exists("./original"):
os.makedirs("./original")
if not os.path.exists("./reconstructed"):
os.makedirs("./reconstructed")
np.save(f"./original/{n+i}.npy", d)
np.save(f"./reconstructed/{n+i}.npy", o)
labels[n : n + mini_batch_size] = label.reshape(mini_batch_size)
names[n : n + mini_batch_size] = name
n += mini_batch_size
if args.chamfer:
chamfer_scores = rescale(chamfer_scores)
if args.emd:
emd_scores = rescale(emd_scores)
if args.kldiv:
kldiv_scores = rescale(kldiv_scores)
if args.feature_diff:
feature_diff_scores = rescale(feature_diff_scores)
for chamfer_score, emd_score, kldiv_score, feature_diff_score in zip(
chamfer_scores, emd_scores, kldiv_scores, feature_diff_scores
):
score = chamfer_score + emd_score + kldiv_score + feature_diff_score
pred.append(score)
pred = np.array(pred, dtype=float)
_min = np.array(min(pred))
_max = np.array(max(pred))
re_scaled = (pred - _min) / (_max - _min)
# re_scaled = rescale(pred)
re_scaled = | np.array(re_scaled, dtype=float) | numpy.array |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension
of N, a hidden layer dimension of H, and performs classification over C
classes.
We train the network with a softmax loss function and L2 regularization on
the weight matrices. The network uses a ReLU nonlinearity after the first
fully connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each
class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values
and biases are initialized to zero. Weights and biases are stored in
the variable self.params, which is a dictionary with the following keys
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each
y[i] is an integer in the range 0 <= y[i] < C. This parameter is
optional; if it is not passed then we only return scores, and if it
is passed then we instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c]
is the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of
training samples.
- grads: Dictionary mapping parameter names to gradients of those
parameters with respect to the loss function; has the same keys as
self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#######################################################################
# TODO: Perform the forward pass, computing the class scores for the #
# input. Store the result in the scores variable, which should be an #
# array of shape (N, C). #
#######################################################################
scores1 = X.dot(W1) + b1 # FC1
X2 = | np.maximum(0, scores1) | numpy.maximum |
import tensorflow as tf
import numpy as np
from config import cfg
def compute_area(xmin, xmax, ymin, ymax):
return ((xmax>xmin)*(xmax-xmin)*(ymax>ymin)*(ymax-ymin)).astype(np.float32)
def bbox_overlaps(boxes, query):
'''
boxes: (N, 4) array
query: (M, 4) array
RETURN: (N, M) array where ai,j is the distance matrix
'''
bxmin, bxmax = np.reshape(boxes[:,0], [-1,1]), np.reshape(boxes[:,2], [-1,1])
bymin, bymax = np.reshape(boxes[:,1], [-1,1]), np.reshape(boxes[:,3], [-1,1])
qxmin, qxmax = np.reshape(query[:,0], [1,-1]), np.reshape(query[:,2], [1,-1])
qymin, qymax = np.reshape(query[:,1], [1,-1]), np.reshape(query[:,3], [1,-1])
ixmin, ixmax = np.maximum(bxmin, qxmin), np.minimum(bxmax, qxmax)
iymin, iymax = np.maximum(bymin, qymin), np.minimum(bymax, qymax)
intersection = compute_area(ixmin, ixmax, iymin, iymax)
area_boxes = compute_area(bxmin, bxmax, bymin, bymax)
area_query = compute_area(qxmin, qxmax, qymin, qymax)
union = area_boxes + area_query - intersection
overlap = intersection / (union + cfg.eps)
return overlap
def minmax2ctrwh(boxes):
widths = np.maximum(0.0, boxes[:,2] - boxes[:,0])
heights = np.maximum(0.0, boxes[:,3] - boxes[:,1])
ctrx = boxes[:,0] + widths * 0.5
ctry = boxes[:,1] + heights * 0.5
return widths, heights, ctrx, ctry
def encode_roi(anchors, boxes):
'''
- anchors: (N, 4) tensors
- boxes: (N, 4) tensors
RETURN
- terms: (N, 4) encoded terms
'''
anc_w, anc_h, anc_ctrx, anc_ctry = minmax2ctrwh(anchors)
box_w, box_h, box_ctrx, box_ctry = minmax2ctrwh(boxes)
tx = (box_ctrx - anc_ctrx) / (anc_w + cfg.eps)
ty = (box_ctry - anc_ctry) / (anc_h + cfg.eps)
tw = np.log(box_w / (anc_w + cfg.eps) + cfg.log_eps)
th = np.log(box_h / (anc_h + cfg.eps) + cfg.log_eps)
return np.stack((tx, ty, tw, th), axis=1)
def rpn_target_one_batch(anchors, gt_boxes):
'''
Propose rpn_targt for one batch
- anchors: (N, 4) array
- gt_boxes: (M, 4) groundtruths boxes
RETURN
- labels: (N,), 1 for positive, 0 for negative, -1 for don't care
- terms: (N, 4), regression terms for each positive anchors
'''
N, M = anchors.shape[0], gt_boxes.shape[0]
iou = bbox_overlaps(gt_boxes, anchors)
max_iou_ind = iou.argmax(axis=1)
max_iou = iou[range(M), max_iou_ind]
max_gt_ind = iou.argmax(axis=0)
max_gt_iou = iou[max_gt_ind, range(N)]
# decide labels
labels = np.zeros(N, np.int32)-1
labels[max_gt_iou < cfg.rpn_negative_iou] = 0 # iou < negative_thresh
labels[max_gt_iou > cfg.rpn_positive_iou] = 1 # iou > postive_thresh
labels[max_iou_ind] = 1 # maximum iou with each groundtruth
# filter out too many positive or negative
pos_inds = np.where(labels == 1)[0]
neg_inds = np.where(labels == 0)[0]
num_pos = int(cfg.rpn_pos_ratio * cfg.rpn_batch_size)
num_neg = cfg.rpn_batch_size - num_pos
if len(pos_inds) > num_pos:
disabled_ind = np.random.choice(pos_inds, size=len(pos_inds)-num_pos, replace=False)
labels[disabled_ind] = -1
if len(neg_inds) > num_neg:
disabled_ind = np.random.choice(neg_inds, size=len(neg_inds)-num_neg, replace=False)
labels[disabled_ind] = -1
# decide regression terms
terms = np.zeros((N,4), np.float32)-1
pos_ind = np.where(labels == 1)[0]
terms[pos_ind] = encode_roi(anchors[pos_ind], gt_boxes[max_gt_ind[pos_ind]])
terms = (terms - cfg.bbox_mean.reshape(1,4)) / cfg.bbox_stddev.reshape(1,4)
terms = terms.astype(np.float32)
#return labels, terms, (np.where(labels==1)[0]).size, (np.where(labels==0)[0]).size, max_gt_iou, max_iou
return labels, terms
def rpn_targets(anchors, gt_boxes):
'''
Return the labels and for all anchors w.r.t each image
- anchors: (N,4) tensor, the
- gt_boxes: a list of (K,2) array, K is the number of gt for each image
RETURN
- out_labels: (M,N) target labels tensor for M image, N anchors
- out_terms: (M,N,4) encoded target terms tensor for M image, N anchors
'''
out_labels, out_terms = [], []
for gt in gt_boxes:
#labels, terms, num_pos_rpn, num_neg_rpn, gt_iou, box_iou = tf.py_func(
# rpn_target_one_batch, [anchors, gt], [tf.int32, tf.float32, tf.int64, tf.int64, tf.float32, tf.float32])
labels, terms = tf.py_func(rpn_target_one_batch, [anchors, gt], [tf.int32, tf.float32])
#################################### DEBUG ##########################################
# num_pos += num_pos_rpn #
# num_neg += num_neg_rpn #
# terms = tf.Print(terms, [tf.convert_to_tensor('max iou'), tf.reduce_max(gt_iou)]) #
# terms = tf.Print(terms, [tf.convert_to_tensor('iou'), gt_iou]) #
# terms = tf.Print(terms, [tf.convert_to_tensor('# gt'), tf.size(box_iou)]) #
# terms = tf.Print(terms, [tf.convert_to_tensor('pos num'), num_pos_rpn]) #
# terms = tf.Print(terms, [tf.convert_to_tensor('neg num'), num_neg_rpn]) #
#####################################################################################
out_labels.append(labels)
out_terms.append(terms)
out_labels, out_terms = tf.stack(out_labels, axis=0), tf.stack(out_terms, axis=0)
out_labels, out_terms = tf.stop_gradient(out_labels), tf.stop_gradient(out_terms)
return out_labels, out_terms
def classifier_target_one_batch(rois, gt_boxes, gt_classes, gt_masks):
'''
Choose foreground and background sample proposals
- rois: (N,4) roi bboxes
- gt_boxes: (M,4) groundtruth boxes
- gt_classes: (M,) class label for each box
- gt_masks: (M,H,W) binary label for each groundtruth instance
RETURN
- sampled_rois: (rois_per_img, 4) sampled rois
- labels: (rois_per_img,) class labels for each foreground, 0 for background
- loc: (fg_per_img, 4) encoded regression targets for each foreground, pad for bg
- mask: (fg_per_img, mask_output_size, mask_output_size, num_classes) class mask
'''
num_rois = cfg.rois_per_img
num_fg = cfg.fg_per_img
num_bg = num_rois - num_fg
iou = bbox_overlaps(rois, gt_boxes)
max_iou_ind = iou.argmax(axis=1)
max_iou = iou[range(iou.shape[0]), max_iou_ind]
fg_inds = np.where(max_iou>cfg.rois_fg_thresh)[0]
bg_inds = np.where((max_iou>=cfg.rois_bg_thresh_low)&(max_iou<=cfg.rois_bg_thresh_high))[0]
fg_inds_ori = fg_inds
bg_inds_ori = bg_inds
if fg_inds.size > 0 and bg_inds.size > 0:
num_fg = min(num_fg, fg_inds.size)
fg_inds = np.random.choice(fg_inds, size=num_fg, replace=False)
num_bg = num_rois - num_fg
bg_inds = np.random.choice(bg_inds, size=num_bg, replace=num_bg>bg_inds.size)
elif fg_inds.size > 0:
fg_inds = np.random.choice(fg_inds, size=num_rois, replace=num_rois>fg_inds.size)
num_fg, num_bg = num_rois, 0
elif bg_inds.size > 0:
bg_inds = np.random.choice(bg_inds, size=num_rois, replace=num_rois>bg_inds.size)
num_fg, num_bg = 0, num_rois
# rois
sampled_rois = rois[np.append(fg_inds, bg_inds), :]
# labels
fg_gt_inds = max_iou_ind[fg_inds]
labels = np.append(gt_classes[fg_gt_inds], | np.zeros(num_bg) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
@Time : 2020/05/06 21:09
@Author : Tianxiaomo
@File : dataset.py
@Noice :
@Modificattion :
@Author :
@Time :
@Detail :
"""
import os
import random
import sys
from typing import Tuple
import cv2
import numpy as np
from numpy.lib.financial import ipmt
import pandas as pd
import torch
from torch.utils.data.dataset import Dataset
from easydict import EasyDict as edict
import matplotlib.pyplot as plt
def rand_uniform_strong(min, max):
if min > max:
swap = min
min = max
max = swap
return random.random() * (max - min) + min
def rand_scale(s):
scale = rand_uniform_strong(1, s)
if random.randint(0, 1) % 2:
return scale
return 1.0 / scale
def rand_precalc_random(min, max, random_part):
if max < min:
swap = min
min = max
max = swap
return (random_part * (max - min)) + min
def fill_truth_detection(bboxes, num_boxes, classes, flip, dx, dy, sx, sy, net_w, net_h):
if bboxes.shape[0] == 0:
return bboxes, 10000
np.random.shuffle(bboxes)
bboxes[:, 0] -= dx
bboxes[:, 2] -= dx
bboxes[:, 1] -= dy
bboxes[:, 3] -= dy
bboxes[:, 0] = np.clip(bboxes[:, 0], 0, sx)
bboxes[:, 2] = np.clip(bboxes[:, 2], 0, sx)
bboxes[:, 1] = np.clip(bboxes[:, 1], 0, sy)
bboxes[:, 3] = np.clip(bboxes[:, 3], 0, sy)
out_box = list(
np.where(
((bboxes[:, 1] == sy) & (bboxes[:, 3] == sy))
| ((bboxes[:, 0] == sx) & (bboxes[:, 2] == sx))
| ((bboxes[:, 1] == 0) & (bboxes[:, 3] == 0))
| ((bboxes[:, 0] == 0) & (bboxes[:, 2] == 0))
)[0]
)
list_box = list(range(bboxes.shape[0]))
for i in out_box:
list_box.remove(i)
bboxes = bboxes[list_box]
if bboxes.shape[0] == 0:
return bboxes, 10000
bboxes = bboxes[np.where((bboxes[:, 4] < classes) & (bboxes[:, 4] >= 0))[0]]
if bboxes.shape[0] > num_boxes:
bboxes = bboxes[:num_boxes]
min_w_h = np.array([bboxes[:, 2] - bboxes[:, 0], bboxes[:, 3] - bboxes[:, 1]]).min()
bboxes[:, 0] *= net_w / sx
bboxes[:, 2] *= net_w / sx
bboxes[:, 1] *= net_h / sy
bboxes[:, 3] *= net_h / sy
if flip:
temp = net_w - bboxes[:, 0]
bboxes[:, 0] = net_w - bboxes[:, 2]
bboxes[:, 2] = temp
return bboxes, min_w_h
def rect_intersection(a, b):
minx = max(a[0], b[0])
miny = max(a[1], b[1])
maxx = min(a[2], b[2])
maxy = min(a[3], b[3])
return [minx, miny, maxx, maxy]
def image_data_augmentation(
mat, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur, truth
):
try:
img = mat
oh, ow, _ = img.shape
pleft, ptop, swidth, sheight = int(pleft), int(ptop), int(swidth), int(sheight)
# crop
src_rect = [pleft, ptop, swidth + pleft, sheight + ptop] # x1,y1,x2,y2
img_rect = [0, 0, ow, oh]
new_src_rect = rect_intersection(src_rect, img_rect) # 交集
dst_rect = [
max(0, -pleft),
max(0, -ptop),
max(0, -pleft) + new_src_rect[2] - new_src_rect[0],
max(0, -ptop) + new_src_rect[3] - new_src_rect[1],
]
# cv2.Mat sized
if (
src_rect[0] == 0
and src_rect[1] == 0
and src_rect[2] == img.shape[0]
and src_rect[3] == img.shape[1]
):
sized = cv2.resize(img, (w, h), cv2.INTER_LINEAR)
else:
cropped = np.zeros([sheight, swidth, 3])
cropped[
:,
:,
] = np.mean(img, axis=(0, 1))
cropped[dst_rect[1] : dst_rect[3], dst_rect[0] : dst_rect[2]] = img[
new_src_rect[1] : new_src_rect[3], new_src_rect[0] : new_src_rect[2]
]
# resize
sized = cv2.resize(cropped, (w, h), cv2.INTER_LINEAR)
# flip
if flip:
# cv2.Mat cropped
sized = cv2.flip(sized, 1) # 0 - x-axis, 1 - y-axis, -1 - both axes (x & y)
# HSV augmentation
# cv2.COLOR_BGR2HSV, cv2.COLOR_RGB2HSV, cv2.COLOR_HSV2BGR, cv2.COLOR_HSV2RGB
if dsat != 1 or dexp != 1 or dhue != 0:
if img.shape[2] >= 3:
hsv_src = cv2.cvtColor(sized.astype(np.float32), cv2.COLOR_RGB2HSV) # RGB to HSV
hsv = cv2.split(hsv_src)
hsv[1] *= dsat
hsv[2] *= dexp
hsv[0] += 179 * dhue
hsv_src = cv2.merge(hsv)
sized = np.clip(
cv2.cvtColor(hsv_src, cv2.COLOR_HSV2RGB), 0, 255
) # HSV to RGB (the same as previous)
else:
sized *= dexp
if blur:
if blur == 1:
dst = cv2.GaussianBlur(sized, (17, 17), 0)
# cv2.bilateralFilter(sized, dst, 17, 75, 75)
else:
ksize = (blur / 2) * 2 + 1
dst = cv2.GaussianBlur(sized, (ksize, ksize), 0)
if blur == 1:
img_rect = [0, 0, sized.cols, sized.rows]
for b in truth:
left = (b.x - b.w / 2.0) * sized.shape[1]
width = b.w * sized.shape[1]
top = (b.y - b.h / 2.0) * sized.shape[0]
height = b.h * sized.shape[0]
roi(left, top, width, height)
roi = roi & img_rect
dst[roi[0] : roi[0] + roi[2], roi[1] : roi[1] + roi[3]] = sized[
roi[0] : roi[0] + roi[2], roi[1] : roi[1] + roi[3]
]
sized = dst
if gaussian_noise:
noise = np.array(sized.shape)
gaussian_noise = min(gaussian_noise, 127)
gaussian_noise = max(gaussian_noise, 0)
cv2.randn(noise, 0, gaussian_noise) # mean and variance
sized = sized + noise
except:
print("OpenCV can't augment image: " + str(w) + " x " + str(h))
sized = mat
return sized
def filter_truth(bboxes, dx, dy, sx, sy, xd, yd):
bboxes[:, 0] -= dx
bboxes[:, 2] -= dx
bboxes[:, 1] -= dy
bboxes[:, 3] -= dy
bboxes[:, 0] = np.clip(bboxes[:, 0], 0, sx)
bboxes[:, 2] = np.clip(bboxes[:, 2], 0, sx)
bboxes[:, 1] = np.clip(bboxes[:, 1], 0, sy)
bboxes[:, 3] = | np.clip(bboxes[:, 3], 0, sy) | numpy.clip |
"""Automated Rectification of Image.
References
----------
1. Chaudhury, Krishnendu, <NAME>, and <NAME>.
"Auto-rectification of user photos." 2014 IEEE International Conference on
Image Processing (ICIP). IEEE, 2014.
2. Bazin, Jean-Charles, and <NAME>. "3-line RANSAC for orthogonal
vanishing point detection." 2012 IEEE/RSJ International Conference on
Intelligent Robots and Systems. IEEE, 2012.
"""
from skimage import feature, color, transform, io
import numpy as np
import logging
from scipy import ndimage
def compute_edgelets(image, sigma=3):
"""Create edgelets as in the paper.
Uses canny edge detection and then finds (small) lines using probabilstic
hough transform as edgelets.
Parameters
----------
image: ndarray
Image for which edgelets are to be computed.
sigma: float
Smoothing to be used for canny edge detection.
Returns
-------
locations: ndarray of shape (n_edgelets, 2)
Locations of each of the edgelets.
directions: ndarray of shape (n_edgelets, 2)
Direction of the edge (tangent) at each of the edgelet.
strengths: ndarray of shape (n_edgelets,)
Length of the line segments detected for the edgelet.
"""
gray_img = color.rgb2gray(image)
edges = feature.canny(gray_img, sigma)
lines = transform.probabilistic_hough_line(edges, line_length=3,
line_gap=2)
locations = []
directions = []
strengths = []
for p0, p1 in lines:
p0, p1 = np.array(p0), np.array(p1)
locations.append((p0 + p1) / 2)
directions.append(p1 - p0)
strengths.append(np.linalg.norm(p1 - p0))
# convert to numpy arrays and normalize
locations = np.array(locations)
directions = np.array(directions)
strengths = np.array(strengths)
directions = np.array(directions) / \
np.linalg.norm(directions, axis=1)[:, np.newaxis]
return (locations, directions, strengths)
def edgelet_lines(edgelets):
"""Compute lines in homogenous system for edglets.
Parameters
----------
edgelets: tuple of ndarrays
(locations, directions, strengths) as computed by `compute_edgelets`.
Returns
-------
lines: ndarray of shape (n_edgelets, 3)
Lines at each of edgelet locations in homogenous system.
"""
locations, directions, _ = edgelets
normals = np.zeros_like(directions)
normals[:, 0] = directions[:, 1]
normals[:, 1] = -directions[:, 0]
p = -np.sum(locations * normals, axis=1)
lines = np.concatenate((normals, p[:, np.newaxis]), axis=1)
return lines
def compute_votes(edgelets, model, threshold_inlier=5):
"""Compute votes for each of the edgelet against a given vanishing point.
Votes for edgelets which lie inside threshold are same as their strengths,
otherwise zero.
Parameters
----------
edgelets: tuple of ndarrays
(locations, directions, strengths) as computed by `compute_edgelets`.
model: ndarray of shape (3,)
Vanishing point model in homogenous cordinate system.
threshold_inlier: float
Threshold to be used for computing inliers in degrees. Angle between
edgelet direction and line connecting the Vanishing point model and
edgelet location is used to threshold.
Returns
-------
votes: ndarry of shape (n_edgelets,)
Votes towards vanishing point model for each of the edgelet.
"""
vp = model[:2] / model[2]
locations, directions, strengths = edgelets
est_directions = locations - vp
dot_prod = np.sum(est_directions * directions, axis=1)
abs_prod = np.linalg.norm(directions, axis=1) * \
np.linalg.norm(est_directions, axis=1)
abs_prod[abs_prod == 0] = 1e-5
cosine_theta = dot_prod / abs_prod
theta = np.arccos(np.abs(cosine_theta))
theta_thresh = threshold_inlier * np.pi / 180
return (theta < theta_thresh) * strengths
def ransac_vanishing_point(edgelets, num_ransac_iter=2000, threshold_inlier=5):
"""Estimate vanishing point using Ransac.
Parameters
----------
edgelets: tuple of ndarrays
(locations, directions, strengths) as computed by `compute_edgelets`.
num_ransac_iter: int
Number of iterations to run ransac.
threshold_inlier: float
threshold to be used for computing inliers in degrees.
Returns
-------
best_model: ndarry of shape (3,)
Best model for vanishing point estimated.
Reference
---------
Chaudhury, Krishnendu, <NAME>, and <NAME>.
"Auto-rectification of user photos." 2014 IEEE International Conference on
Image Processing (ICIP). IEEE, 2014.
"""
locations, directions, strengths = edgelets
lines = edgelet_lines(edgelets)
num_pts = strengths.size
arg_sort = np.argsort(-strengths)
first_index_space = arg_sort[:num_pts // 5]
second_index_space = arg_sort[:num_pts // 2]
best_model = None
best_votes = np.zeros(num_pts)
for ransac_iter in range(num_ransac_iter):
ind1 = np.random.choice(first_index_space)
ind2 = np.random.choice(second_index_space)
l1 = lines[ind1]
l2 = lines[ind2]
current_model = np.cross(l1, l2)
if np.sum(current_model**2) < 1 or current_model[2] == 0:
# reject degenerate candidates
continue
current_votes = compute_votes(
edgelets, current_model, threshold_inlier)
if current_votes.sum() > best_votes.sum():
best_model = current_model
best_votes = current_votes
logging.info("Current best model has {} votes at iteration {}".format(
current_votes.sum(), ransac_iter))
return best_model
def ransac_3_line(edgelets, focal_length, num_ransac_iter=2000,
threshold_inlier=5):
"""Estimate orthogonal vanishing points using 3 line Ransac algorithm.
Assumes camera has been calibrated and its focal length is known.
Parameters
----------
edgelets: tuple of ndarrays
(locations, directions, strengths) as computed by `compute_edgelets`.
focal_length: float
Focal length of the camera used.
num_ransac_iter: int
Number of iterations to run ransac.
threshold_inlier: float
threshold to be used for computing inliers in degrees.
Returns
-------
vp1: ndarry of shape (3,)
Estimated model for first vanishing point.
vp2: ndarry of shape (3,)
Estimated model for second vanishing point, which is orthogonal to
first vanishing point.
Reference
---------
Bazin, Jean-Charles, and <NAME>. "3-line RANSAC for orthogonal
vanishing point detection." 2012 IEEE/RSJ International Conference on
Intelligent Robots and Systems. IEEE, 2012.
"""
locations, directions, strengths = edgelets
lines = edgelet_lines(edgelets)
num_pts = strengths.size
arg_sort = np.argsort(-strengths)
first_index_space = arg_sort[:num_pts // 5]
second_index_space = arg_sort[:num_pts // 5]
third_index_space = arg_sort[:num_pts // 2]
best_model = (None, None)
best_votes = 0
for ransac_iter in range(num_ransac_iter):
ind1 = np.random.choice(first_index_space)
ind2 = np.random.choice(second_index_space)
ind3 = np.random.choice(third_index_space)
l1 = lines[ind1]
l2 = lines[ind2]
l3 = lines[ind3]
vp1 = np.cross(l1, l2)
# The vanishing line polar to v1
h = np.dot(vp1, [1 / focal_length**2, 1 / focal_length**2, 1])
vp2 = np.cross(h, l3)
if np.sum(vp1**2) < 1 or vp1[2] == 0:
# reject degenerate candidates
continue
if np.sum(vp2**2) < 1 or vp2[2] == 0:
# reject degenerate candidates
continue
vp1_votes = compute_votes(edgelets, vp1, threshold_inlier)
vp2_votes = compute_votes(edgelets, vp2, threshold_inlier)
current_votes = (vp1_votes > 0).sum() + (vp2_votes > 0).sum()
if current_votes > best_votes:
best_model = (vp1, vp2)
best_votes = current_votes
logging.info("Current best model has {} votes at iteration {}".format(
current_votes, ransac_iter))
return best_model
def reestimate_model(model, edgelets, threshold_reestimate=5):
"""Reestimate vanishing point using inliers and least squares.
All the edgelets which are within a threshold are used to reestimate model
Parameters
----------
model: ndarry of shape (3,)
Vanishing point model in homogenous coordinates which is to be
reestimated.
edgelets: tuple of ndarrays
(locations, directions, strengths) as computed by `compute_edgelets`.
All edgelets from which inliers will be computed.
threshold_inlier: float
threshold to be used for finding inlier edgelets.
Returns
-------
restimated_model: ndarry of shape (3,)
Reestimated model for vanishing point in homogenous coordinates.
"""
locations, directions, strengths = edgelets
inliers = compute_votes(edgelets, model, threshold_reestimate) > 0
locations = locations[inliers]
directions = directions[inliers]
strengths = strengths[inliers]
lines = edgelet_lines((locations, directions, strengths))
a = lines[:, :2]
b = -lines[:, 2]
est_model = np.linalg.lstsq(a, b)[0]
return np.concatenate((est_model, [1.]))
def remove_inliers(model, edgelets, threshold_inlier=10):
"""Remove all inlier edglets of a given model.
Parameters
----------
model: ndarry of shape (3,)
Vanishing point model in homogenous coordinates which is to be
reestimated.
edgelets: tuple of ndarrays
(locations, directions, strengths) as computed by `compute_edgelets`.
threshold_inlier: float
threshold to be used for finding inlier edgelets.
Returns
-------
edgelets_new: tuple of ndarrays
All Edgelets except those which are inliers to model.
"""
inliers = compute_votes(edgelets, model, 10) > 0
locations, directions, strengths = edgelets
locations = locations[~inliers]
directions = directions[~inliers]
strengths = strengths[~inliers]
edgelets = (locations, directions, strengths)
return edgelets
def compute_homography_and_warp(image, vp1, vp2, clip=True, clip_factor=3):
"""Compute homography from vanishing points and warp the image.
It is assumed that vp1 and vp2 correspond to horizontal and vertical
directions, although the order is not assumed.
Firstly, projective transform is computed to make the vanishing points go
to infinty so that we have a fronto parellel view. Then,Computes affine
transfom to make axes corresponding to vanishing points orthogonal.
Finally, Image is translated so that the image is not missed. Note that
this image can be very large. `clip` is provided to deal with this.
Parameters
----------
image: ndarray
Image which has to be wrapped.
vp1: ndarray of shape (3, )
First vanishing point in homogenous coordinate system.
vp2: ndarray of shape (3, )
Second vanishing point in homogenous coordinate system.
clip: bool, optional
If True, image is clipped to clip_factor.
clip_factor: float, optional
Proportion of image in multiples of image size to be retained if gone
out of bounds after homography.
Returns
-------
warped_img: ndarray
Image warped using homography as described above.
"""
# Find Projective Transform
vanishing_line = np.cross(vp1, vp2)
H = np.eye(3)
H[2] = vanishing_line / vanishing_line[2]
H = H / H[2, 2]
# Find directions corresponding to vanishing points
v_post1 = np.dot(H, vp1)
v_post2 = np.dot(H, vp2)
v_post1 = v_post1 / | np.sqrt(v_post1[0]**2 + v_post1[1]**2) | numpy.sqrt |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from nose.plugins.attrib import attr
from nose.tools import assert_raises, raises
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_equal, assert_almost_equal, assert_array_less
from scipy.stats import hypergeom, binom
from cryptorandom.cryptorandom import SHA256
from ..ksample import (k_sample,
one_way_anova,
bivariate_k_sample,
two_way_anova)
import permute.data as data
from permute.utils import get_prng
def test_worms_ksample():
worms = data.worms()
res = k_sample(worms.x, worms.y, stat='one-way anova', reps=1000, seed=1234)
assert_array_less(0.006, res[0])
assert_array_less(res[0], 0.02)
def test_one_way_anova():
group = np.ones(5)
x = np.array(range(5))
xbar = np.mean(x)
assert_equal(one_way_anova(x, group, xbar), 0)
group = np.array([1]*3 + [2]*2)
expected = 3*1**2 + 2*1.5**2
assert_equal(one_way_anova(x, group, xbar), expected)
def test_two_way_anova():
prng = get_prng(100)
group1 = np.array([1]*5 + [2]*5)
group2 = np.array(list(range(5))*2)
x = prng.randint(1, 10, 10)
xbar = np.mean(x)
val = two_way_anova(x, group1, group2, xbar)
assert_almost_equal(val, 0.296, 3)
x = group2 + 1
xbar = 3
assert_equal(two_way_anova(x, group1, group2, xbar), 1)
def test_testosterone_ksample():
testosterone = data.testosterone()
x = np.hstack(testosterone.tolist())
group1 = np.hstack([[i]*5 for i in range(len(testosterone))])
group2 = np.array(list(range(5))*len(testosterone))
assert_equal(len(group1), 55)
assert_equal(len(group2), 55)
assert_equal(len(x), 55)
res = bivariate_k_sample(x, group1, group2, reps=5000, seed=5)
| assert_array_less(res[0], 0.0002) | numpy.testing.assert_array_less |
import numpy as np
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_val_score
from sklearn.utils import shuffle
from numpy.linalg import matrix_rank as rank
# Single step feature selection method
class MCUVE:
def __init__(self, x, y, ncomp=1, nrep=500, testSize=0.2):
self.x = x
self.y = y
# The number of latent components should not be larger than any dimension size of independent matrix
self.ncomp = min([ncomp, rank(x)])
self.nrep = nrep
self.testSize = testSize
self.criteria = None
self.featureIndex = None
self.featureR2 = np.full(self.x.shape[1], np.nan)
self.selFeature = None
def calcCriteria(self):
PLSCoef = np.zeros((self.nrep, self.x.shape[1]))
ss = ShuffleSplit(n_splits=self.nrep, test_size=self.testSize)
step = 0
for train, test in ss.split(self.x, self.y):
xtrain = self.x[train, :]
ytrain = self.y[train]
plsModel = PLSRegression(min([self.ncomp, rank(xtrain)]))
plsModel.fit(xtrain, ytrain)
PLSCoef[step, :] = plsModel.coef_.T
step += 1
meanCoef = np.mean(PLSCoef, axis=0)
stdCoef = np.std(PLSCoef, axis=0)
self.criteria = meanCoef / stdCoef
def evalCriteria(self, cv=3):
self.featureIndex = np.argsort(-np.abs(self.criteria))
for i in range(self.x.shape[1]):
xi = self.x[:, self.featureIndex[:i + 1]]
if i<self.ncomp:
regModel = LinearRegression()
else:
regModel = PLSRegression(min([self.ncomp, rank(xi)]))
cvScore = cross_val_score(regModel, xi, self.y, cv=cv)
self.featureR2[i] = np.mean(cvScore)
def cutFeature(self, *args):
cuti = np.argmax(self.featureR2)
self.selFeature = self.featureIndex[:cuti+1]
if len(args) != 0:
returnx = list(args)
i = 0
for argi in args:
if argi.shape[1] == self.x.shape[1]:
returnx[i] = argi[:, self.selFeature]
i += 1
return tuple(returnx)
class RT(MCUVE):
def calcCriteria(self):
# calculate normal pls regression coefficient
plsmodel0=PLSRegression(self.ncomp)
plsmodel0.fit(self.x, self.y)
# calculate noise reference regression coefficient
plsCoef0=plsmodel0.coef_
PLSCoef = np.zeros((self.nrep, self.x.shape[1]))
for i in range(self.nrep):
randomidx = list(range(self.x.shape[0]))
np.random.shuffle(randomidx)
ytrain = self.y[randomidx]
plsModel = PLSRegression(self.ncomp)
plsModel.fit(self.x, ytrain)
PLSCoef[i, :] = plsModel.coef_.T
plsCoef0 = np.tile(np.reshape(plsCoef0, [1, -1]), [ self.nrep, 1])
criteria = np.sum(np.abs(PLSCoef) > np.abs(plsCoef0), axis=0)/self.nrep
self.criteria = criteria
def evalCriteria(self, cv=3):
# Note: small P value indicating important feature
self.featureIndex = np.argsort(self.criteria)
for i in range(self.x.shape[1]):
xi = self.x[:, self.featureIndex[:i + 1]]
if i<self.ncomp:
regModel = LinearRegression()
else:
regModel = PLSRegression(min([self.ncomp, rank(xi)]))
cvScore = cross_val_score(regModel, xi, self.y, cv=cv)
self.featureR2[i] = | np.mean(cvScore) | numpy.mean |
import numpy as np
from .onshore_cost_model import onshore_tcc
def offshore_turbine_capex(capacity, hub_height, rotor_diam, depth, distance_to_shore, distance_to_bus=3, foundation="monopile", mooring_count=3, anchor="DEA", turbine_count=80, turbine_spacing=5, turbine_row_spacing=9):
"""
A cost and scaling model (CSM) to calculate the total cost of a 3-bladed, direct drive offshore wind turbine according to the cost model proposed by Fingersh et al. [1] and Maples et al. [2].
The CSM distinguises between seaflor-fixed foundation types; "monopile" and "jacket" and floating foundation types; "semisubmersible" and "spar".
The total turbine cost includes the contributions of the turbine capital cost (TCC), amounting 32.9% for fixed or 23.9% for floating structures, the balance of system costs (BOS) contribution, amounting 46.2% and 60.8% respectively, as well as the finantial costs as the complementary percentage contribution (15.9% and 20.9%) in the same manner [3].
A CSM normalization is done such that a chosen baseline offshore turbine taken by Caglayan et al. [4] (see notes for details) corresponds to an expected specific cost of 2300 €/kW in a 2050 European context as suggested by the 2016 cost of wind energy review by Stehly [3].
Parameters
----------
capacity : numeric or array-like
Turbine's nominal capacity in kW.
hub_height : numeric or array-like
Turbine's hub height in m.
rotor_diam : numeric or array-like
Turbine's rotor diameter in m.
depth : numeric or array-like
Water depth in m (absolute value) at the turbine's location.
distance_to_shore : numeric or array-like
Distance from the turbine's location to the nearest shore in km.
distance_to_bus : numeric or array-like, optional
Distance from the wind farm's bus in km from the turbine's location.
foundation : str or array-like of strings, optional
Turbine's foundation type. Accepted types are: "monopile", "jacket", "semisubmersible" or "spar", by default "monopile"
mooring_count : numeric, optional
Refers to the number of mooring lines are there attaching a turbine only applicable for floating foundation types. By default 3 assuming a triangular attachment to the seafloor.
anchor : str, optional
Turbine's anchor type only applicable for floating foundation types, by default as reccomended by [1].
Arguments accepted are "dea" (drag embedment anchor) or "spa" (suction pile anchor).
turbine_count : numeric, optional
Number of turbines in the offshore windpark. CSM valid for the range [3-200], by default 80
turbine_spacing : numeric, optional
Spacing distance in a row of turbines (turbines that share the electrical connection) to the bus. The value must be a multiplyer of rotor diameter. CSM valid for the range [4-9], by default 5
turbine_row_spacing : numeric, optional
Spacing distance between rows of turbines. The value must be a multiplyer of rotor diameter. CSM valid for the range [4-10], by default 9
Returns
--------
numeric or array-like
Offshore turbine total cost
See also
--------
onshore_turbine_capex(capacity, hub_height, rotor_diam, base_capex, base_capacity, base_hub_height, base_rotor_diam, tcc_share, bos_share)
Notes
-------
The baseline offshore turbine correspongs to the optimal desing for Europe according to Caglayan et al. [4]: capacity = 9400 kW, hub height = 135 m, rotor diameter = 210 m, "monopile" foundation, reference water depth = 40 m, and reference distance to shore = 60 km.
Sources
-------
[1] Fingersh, L., <NAME>., & <NAME>. (2006). Wind Turbine Design Cost and Scaling Model. Nrel. https://www.nrel.gov/docs/fy07osti/40566.pdf
[2] <NAME>., <NAME>., & <NAME>. (2010). Comparative Assessment of Direct Drive High Temperature Superconducting Generators in Multi-Megawatt Class Wind Turbines. Energy. https://doi.org/10.2172/991560
[3] <NAME>., <NAME>., & <NAME>. (2016). Cost of Wind Energy Review. Technical Report. https://www.nrel.gov/docs/fy18osti/70363.pdf
[4] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2019). The techno-economic potential of offshore wind energy with optimized future turbine designs in Europe. Applied Energy. https://doi.org/10.1016/j.apenergy.2019.113794
[5] <NAME>., <NAME>., & <NAME>. (2017). NREL Offshore Balance-of- System Model NREL Offshore Balance-of- System Model. https://www.nrel.gov/docs/fy17osti/66874.pdf
[6] <NAME>., <NAME>., <NAME>., & <NAME>. (2014). Levelised cost of energy for offshore floating wind turbines in a life cycle perspective. Renewable Energy, 66, 714–728. https://doi.org/10.1016/j.renene.2014.01.017
[7] <NAME>., & <NAME>. (2013). Levelised Costs Of Energy For Offshore Floating Wind Turbine Concepts [Norwegian University of Life Sciences]. https://nmbu.brage.unit.no/nmbu-xmlui/bitstream/handle/11250/189073/Bjerkseter%2C C. %26 Ågotnes%2C A. %282013%29 - Levelised Costs of Energy for Offshore Floating Wind Turbine Concepts.pdf?sequence=1&isAllowed=y
[8] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2016). IEA Wind Task 26: Offshore Wind Farm Baseline Documentation. https://doi.org/10.2172/1259255
[9] RPG CABLES, & KEC International limited. (n.d.). EXTRA HIGH VOLTAGE cables. RPG CABLES. www.rpgcables.com/images/product/EHV-catalogue.pdf
"""
# TODO: Generalize this function further(like with the onshore cost model)
# PREPROCESS INPUTS
cp = np.array(capacity / 1000)
# rr = np.array(rotor_diam / 2)
rd = np.array(rotor_diam)
hh = np.array(hub_height)
depth = np.abs(np.array(depth))
distance_to_shore = np.array(distance_to_shore)
distance_to_bus = np.array(distance_to_bus)
# COMPUTE COSTS
tcc = onshore_tcc(cp=cp * 1000, hh=hh, rd=rd)
tcc *= 0.7719832742256006
bos = offshore_bos(cp=cp, rd=rd, hh=hh, depth=depth, distance_to_shore=distance_to_shore, distance_to_bus=distance_to_bus, foundation=foundation,
mooring_count=mooring_count, anchor=anchor, turbine_count=turbine_count,
turbine_spacing=turbine_spacing, turbine_row_spacing=turbine_row_spacing, )
bos *= 0.3669156255898912
if foundation == 'monopile' or foundation == 'jacket':
fin = (tcc + bos) * 20.9 / (32.9 + 46.2) # Scaled according to tcc [7]
else:
fin = (tcc + bos) * 15.6 / (60.8 + 23.6) # Scaled according to tcc [7]
return tcc + bos + fin
# return np.array([tcc,bos,fin])
def offshore_bos(cp, rd, hh, depth, distance_to_shore, distance_to_bus, foundation, mooring_count, anchor, turbine_count, turbine_spacing, turbine_row_spacing):
"""
A function to determine the balance of the system cost (BOS) of an offshore turbine based on the capacity, hub height and rotor diamter values according to Fingersh et al. [1].
Parameters
----------
cp : numeric or array-like
Turbine's nominal capacity in kW
rd : numeric or array-like
Turbine's rotor diameter in m
hh : numeric or array-like
Turbine's hub height in m
depth : numeric or array-like
Water depth in m (absolute value) at the turbine's location.
distance_to_shore : numeric or array-like
Distance from the turbine's location to the nearest shore in km.
distance_to_bus : numeric or array-like, optional
Distance from the wind farm's bus in km from the turbine's location.
foundation : str or array-like of strings, optional
Turbine's foundation type. Accepted types are: "monopile", "jacket", "semisubmersible" or "spar", by default "monopile"
mooring_count : numeric, optional
Refers to the number of mooring lines are there attaching a turbine only applicable for floating foundation types. By default 3 assuming a triangular attachment to the seafloor.
anchor : str, optional
Turbine's anchor type only applicable for floating foundation types, by default as reccomended by [1].
Arguments accepted are "dea" (drag embedment anchor) or "spa" (suction pile anchor).
turbine_count : numeric, optional
Number of turbines in the offshore windpark. CSM valid for the range [3-200], by default 80
turbine_spacing : numeric, optional
Spacing distance in a row of turbines (turbines that share the electrical connection) to the bus. The value must be a multiplyer of rotor diameter. CSM valid for the range [4-9], by default 5
turbine_row_spacing : numeric, optional
Spacing distance between rows of turbines. The value must be a multiplyer of rotor diameter. CSM valid for the range [4-10], by default 9
Returns
-------
numeric
Offshore turbine's BOS in monetary units.
Notes
------
Assembly and installation costs could not be implemented due to the excessive number of unspecified constants considered by Smart et al. [8]. Therefore empirical equations were derived which fit the sensitivities to the baseline plants shown in [8]. These ended up being linear equations in turbine capacity and sea depth (only for floating turbines).
Sources
---------
[1] <NAME>., <NAME>., & <NAME>. (2006). Wind Turbine Design Cost and Scaling Model. Nrel. https://www.nrel.gov/docs/fy07osti/40566.pdf
[2] <NAME>., <NAME>., & <NAME>. (2010). Comparative Assessment of Direct Drive High Temperature Superconducting Generators in Multi-Megawatt Class Wind Turbines. Energy. https://doi.org/10.2172/991560
[3] <NAME>., <NAME>., & <NAME>. (2016). Cost of Wind Energy Review. Technical Report. https://www.nrel.gov/docs/fy18osti/70363.pdf
[4] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2019). The techno-economic potential of offshore wind energy with optimized future turbine designs in Europe. Applied Energy. https://doi.org/10.1016/j.apenergy.2019.113794
[5] <NAME>., <NAME>., & <NAME>. (2017). NREL Offshore Balance-of- System Model NREL Offshore Balance-of- System Model. https://www.nrel.gov/docs/fy17osti/66874.pdf
[6] <NAME>., <NAME>., <NAME>., & <NAME>. (2014). Levelised cost of energy for offshore floating wind turbines in a life cycle perspective. Renewable Energy, 66, 714–728. https://doi.org/10.1016/j.renene.2014.01.017
[7] <NAME>., & <NAME>. (2013). Levelised Costs Of Energy For Offshore Floating Wind Turbine Concepts [Norwegian University of Life Sciences]
[8] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2016). IEA Wind Task 26: Offshore Wind Farm Baseline Documentation. https://doi.org/10.2172/1259255
[9] RPG CABLES, & KEC International limited. (n.d.). EXTRA HIGH VOLTAGE cables. RPG CABLES. www.rpgcables.com/images/product/EHV-catalogue.pdf
"""
# rr = rd / 2
# prevent problems with negative depth values
depth = np.abs(depth)
foundation = foundation.lower()
anchor = anchor.lower()
if foundation == "monopile" or foundation == "jacket":
fixedType = True
elif foundation == "spar" or foundation == "semisubmersible":
fixedType = False
else:
raise ValueError("Please choose one of the four foundation types: monopile, jacket, spar, or semisubmersible")
# CONSTANTS AND ASSUMPTIONS (all from [1] except where noted)
# Stucture are foundation
# embedmentDepth = 30 # meters
monopileCostRate = 2250 # dollars/tonne
monopileTPCostRate = 3230 # dollars/tonne
sparSCCostRate = 3120 # dollars/tonne
sparTCCostRate = 4222 # dollars/tonne
sparBallCostRate = 100 # dollars/tonne
jacketMLCostRate = 4680 # dollars/tonne
jacketTPCostRate = 4500 # dollars/tonne
jacketPileCostRate = 2250 # dollars/tonne
semiSubmersibleSCCostRate = 3120 # dollars/tonne
semiSubmersibleTCostRate = 6250 # dollars/tonne
semiSubmersibleHPCostRate = 6250 # dollars/tonne
mooringCostRate = 721 # dollars/tonne -- 0.12m diameter is chosen since it is the median in [1]
outfittingSteelCost = 7250 # dollars/tonne
# the values of anchor cost is calculated from Table8 in [2] by assuming a euro to dollar rate of 1.35
DEA_anchorCost = 154 # dollars [2]
SPA_anchorCost = 692 # dollars [2]
# Electrical
# current rating values are taken from source an approximate number is chosen from tables[4]
cable1CurrentRating = 400 # [4]
cable2CurrentRating = 600 # [4]
# exportCableCurrentRating = 1000 # [4]
arrayVoltage = 33
# exportCableVoltage = 220
powerFactor = 0.95
# buriedDepth = 1 # this value is chosen from [5] IF THIS CHANGES FROM ONE "singleStringPower1" needs to be updated
catenaryLengthFactor = 0.04
excessCableFactor = 0.1
numberOfSubStations = 1 # From the example used in [5]
arrayCableCost = 281000 * 1.35 # dollars/km (converted from EUR) [3]
externalCableCost = 443000 * 1.35 # dollars/km (converted from EUR) [3]
singleTurbineInterfaceCost = 0 # Could not find a number...
substationInterfaceCost = 0 # Could not find a number...
dynamicCableFactor = 2
mainPowerTransformerCostRate = 12500 # dollers/MVA
highVoltageSwitchgearCost = 950000 # dollars
mediumVoltageSwitchgearCost = 500000 # dollars
shuntReactorCostRate = 35000 # dollars/MVA
dieselGeneratorBackupCost = 1000000 # dollars
workspaceCost = 2000000 # dollars
otherAncillaryCosts = 3000000 # dollars
fabricationCostRate = 14500 # dollars/tonne
topsideDesignCost = 4500000 # dollars
assemblyFactor = 1 # could not find a number...
offshoreSubstationSubstructureCostRate = 6250 # dollars/tonne
substationSubstructurePileCostRate = 2250 # dollars/tonne
interconnectVoltage = 345 # kV
# GENERAL (APEENDIX B in NREL BOS MODEL)
# hubDiam = cp / 4 + 2
# bladeLength = (rd - hubDiam) / 2
# nacelleWidth = hubDiam + 1.5
# nacelleLength = 2 * nacelleWidth
# RNAMass is rotor nacelle assembly
RNAMass = 2.082 * cp * cp + 44.59 * cp + 22.48
# towerDiam = cp / 2 + 4
# towerMass = (0.4 * np.pi * np.power(rr, 2) * hh - 1500) / 1000
# STRUCTURE AND FOUNDATION
if foundation == 'monopile':
# monopileLength = depth + embedmentDepth + 5
monopileMass = (np.power((cp * 1000), 1.5) + (np.power(hh, 3.7) / 10) + 2100 * np.power(depth, 2.25) + np.power((RNAMass * 1000), 1.13)) / 10000
monopileCost = monopileMass * monopileCostRate
# monopile transition piece mass is called as monopileTPMass
monopileTPMass = np.exp(2.77 + 1.04 * np.power(cp, 0.5) + 0.00127 * np.power(depth, 1.5))
monopileTPCost = monopileTPMass * monopileTPCostRate
foundationCost = monopileCost + monopileTPCost
mooringAndAnchorCost = 0
elif foundation == 'jacket':
# jacket main lattice mass is called as jacketMLMass
jacketMLMass = np.exp(3.71 + 0.00176 * np.power(cp, 2.5) + 0.645 * np.log(np.power(depth, 1.5)))
jacketMLCost = jacketMLMass * jacketMLCostRate
# jacket transition piece mass is called as jacketTPMass
jacketTPMass = 1 / (((-0.0131 + 0.0381) / np.log(cp)) - 0.00000000227 * np.power(depth, 3))
jacketTPCost = jacketTPMass * jacketTPCostRate
# jacket pile mass is called as jacketPileMass
jacketPileMass = 8 * np.power(jacketMLMass, 0.5574)
jacketPileCost = jacketPileMass * jacketPileCostRate
foundationCost = jacketMLCost + jacketTPCost + jacketPileCost
mooringAndAnchorCost = 0
elif foundation == 'spar':
# spar stiffened column mass is called as sparSCMass
sparSCMass = 535.93 + 17.664 * np.power(cp, 2) + 0.02328 * depth * np.log(depth)
sparSCCost = sparSCMass * sparSCCostRate
# spar tapered column mass is called as sparTCMass
sparTCMass = 125.81 * np.log(cp) + 58.712
sparTCCost = sparTCMass * sparTCCostRate
# spar ballast mass is called as sparBallMass
sparBallMass = -16.536 * np.power(cp, 2) + 1261.8 * cp - 1554.6
sparBallCost = sparBallMass * sparBallCostRate
foundationCost = sparSCCost + sparTCCost + sparBallCost
if anchor == 'dea':
anchorCost = DEA_anchorCost
# the equation is derived from [3]
mooringLength = 1.5 * depth + 350
elif anchor == 'spa':
anchorCost = SPA_anchorCost
# since it is assumed to have an angle of 45 degrees it is multiplied by 1.41 which is squareroot of 2 [3]
mooringLength = 1.41 * depth
else:
raise ValueError("Please choose an anchor type!")
mooringAndAnchorCost = mooringLength * mooringCostRate + anchorCost
elif foundation == 'semisubmersible':
# semiSubmersible stiffened column mass is called as semiSubmersibleSCMass
semiSubmersibleSCMass = -0.9571 * np.power(cp, 2) + 40.89 * cp + 802.09
semiSubmersibleSCCost = semiSubmersibleSCMass * semiSubmersibleSCCostRate
# semiSubmersible truss mass is called as semiSubmersibleTMass
semiSubmersibleTMass = 2.7894 * np.power(cp, 2) + 15.591 * cp + 266.03
semiSubmersibleTCost = semiSubmersibleTMass * semiSubmersibleTCostRate
# semiSubmersible heavy plate mass is called as semiSubmersibleHPMass
semiSubmersibleHPMass = -0.4397 * np.power(cp, 2) + 21.145 * cp + 177.42
semiSubmersibleHPCost = semiSubmersibleHPMass * semiSubmersibleHPCostRate
foundationCost = semiSubmersibleSCCost + semiSubmersibleTCost + semiSubmersibleHPCost
if anchor == 'dea':
anchorCost = DEA_anchorCost
# the equation is derived from [3]
mooringLength = 1.5 * depth + 350
elif anchor == 'spa':
anchorCost = SPA_anchorCost
# since it is assumed to have an angle of 45 degrees it is multiplied by 1.41 which is squareroot of 2 [3]
mooringLength = 1.41 * depth
else:
raise ValueError("Please choose an anchor type!")
mooringAndAnchorCost = mooringLength * mooringCostRate + anchorCost
if fixedType:
if cp > 4:
secondarySteelSubstructureMass = 40 + (0.8 * (18 + depth))
else:
secondarySteelSubstructureMass = 35 + (0.8 * (18 + depth))
elif foundation == 'spar':
secondarySteelSubstructureMass = np.exp(3.58 + 0.196 * np.power(cp, 0.5) * np.log(cp) + 0.00001 * depth * np.log(depth))
elif foundation == 'semisubmersible':
secondarySteelSubstructureMass = -0.153 * | np.power(cp, 2) | numpy.power |
# -*- coding: utf-8 -*-
"""Core library of thermd.
Library of 1-dimensional (modelica-like) models. Core file with the API of the library.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum, auto
from pathlib import Path
from typing import List, Dict, Type, Union, Optional, Tuple, Any
# from matplotlib import pyplot as plt
import networkx as nx
import numpy as np
import pyexcel as pe
# from CoolProp.CoolProp import AbstractState
from thermd.helper import get_logger
# Initialize global logger
logger = get_logger(__name__)
# Enums
class NodeTypes(Enum):
MODEL = auto()
BLOCK = auto()
PORT = auto()
class ConnectionTypes(Enum):
INTERNAL = auto()
FLUID = auto()
SIGNAL = auto()
# THERMAL = auto()
class PortTypes(Enum):
FLUID_INLET = auto()
FLUID_OUTLET = auto()
FLUID_INLET_OUTLET = auto()
SIGNAL_INLET = auto()
SIGNAL_OUTLET = auto()
SIGNAL_INLET_OUTLET = auto()
# THERMAL_INLET = auto()
# THERMAL_OUTLET = auto()
# THERMAL_INLET_OUTLET = auto()
class StatePhases(Enum):
LIQUID = 0
SUPERCRITICAL = 1
SUPERCRITICAL_GAS = 2
SUPERCRITICAL_LIQUID = 3
CRITICAL_POINT = 4
GAS = 5
TWOPHASE = 6
UNKNOWN = 7
NOT_IMPOSED = 8
# Result classes
class BaseResultClass(ABC):
...
@dataclass
class SystemResult(BaseResultClass):
models: Optional[Dict[str, ModelResult]]
blocks: Optional[Dict[str, BlockResult]]
success: bool
status: np.int8
message: str
nit: np.int16
@classmethod
def from_success(
cls: Type[SystemResult],
models: Optional[Dict[str, ModelResult]],
blocks: Optional[Dict[str, BlockResult]],
nit: np.int16,
) -> SystemResult:
return cls(
models=models,
blocks=blocks,
success=True,
status=np.int8(0),
message="Solver finished successfully.",
nit=nit,
)
@classmethod
def from_error(
cls: Type[SystemResult],
models: Optional[Dict[str, ModelResult]],
blocks: Optional[Dict[str, BlockResult]],
nit: np.int16,
) -> SystemResult:
return cls(
models=models,
blocks=blocks,
success=False,
status=np.int8(2),
message="Solver didn't finish successfully.",
nit=nit,
)
@classmethod
def from_convergence(
cls: Type[SystemResult],
models: Optional[Dict[str, ModelResult]],
blocks: Optional[Dict[str, BlockResult]],
nit: np.int16,
) -> SystemResult:
return cls(
models=models,
blocks=blocks,
success=False,
status=np.int8(1),
message="Solver didn't converge successfully.",
nit=nit,
)
@dataclass
class ModelResult(BaseResultClass):
states: Optional[Dict[str, BaseStateClass]]
signals: Optional[Dict[str, BaseSignalClass]]
@dataclass
class BlockResult(BaseResultClass):
signals: Optional[Dict[str, BaseSignalClass]]
# System classes
class BaseSystemClass(ABC):
"""Base class of the physical system.
The abstract base class of the physical system describes the API of every
derived physical system. Physical systems are the main classes of the library,
which combines all components (models, blocks, connectors, etc.), as well as
all methods to prepare, solve and illustrate the system.
"""
def __init__(self: BaseSystemClass, **kwargs):
"""Initialize base system class.
Init function of the base system class.
"""
# System parameter
self._stop_criterion_energy = np.float64(1)
self._stop_criterion_momentum = np.float64(1)
self._stop_criterion_mass = np.float64(0.001)
self._stop_criterion_signal = | np.float64(0.001) | numpy.float64 |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index = MultiIndex(levels=levels, labels=labels)
else:
values = index.values.tolist()
index = MultiIndex.from_tuples(values + [values[0]])
assert index.has_duplicates
# no overflow
check(4, False)
check(4, True)
# overflow possible
check(8, False)
check(8, True)
# GH 9125
n, k = 200, 5000
levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)]
labels = [np.random.choice(n, k * n) for lev in levels]
mi = MultiIndex(levels=levels, labels=labels)
for keep in ['first', 'last', False]:
left = mi.duplicated(keep=keep)
right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep)
tm.assert_numpy_array_equal(left, right)
# GH5873
for a in [101, 102]:
mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]])
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
2, dtype='bool'))
for n in range(1, 6): # 1st level shape
for m in range(1, 5): # 2nd level shape
# all possible unique combinations, including nan
lab = product(range(-1, n), range(-1, m))
mi = MultiIndex(levels=[list('abcde')[:n], list('WXYZ')[:m]],
labels=np.random.permutation(list(lab)).T)
assert len(mi) == (n + 1) * (m + 1)
assert not mi.has_duplicates
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
assert mi.get_duplicates().equals(MultiIndex.from_arrays(
[[], []]))
tm.assert_numpy_array_equal(mi.duplicated(), np.zeros(
len(mi), dtype='bool'))
def test_duplicate_meta_data(self):
# GH 10115
index = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [index,
index.set_names([None, None]),
index.set_names([None, 'Num']),
index.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_get_unique_index(self):
idx = self.index[[0, 1, 0, 1, 1, 0, 0]]
expected = self.index._shallow_copy(idx[[0, 1]])
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(self, names):
mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')],
names=mi.names)
tm.assert_index_equal(res, exp)
mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')],
names=names)
res = mi.unique()
exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(self, level):
# GH #17896 - with level= argument
result = self.index.unique(level=level)
expected = self.index.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
def test_unique_datetimelike(self):
idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = pd.MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
def test_tolist(self):
result = self.index.tolist()
exp = list(self.index.values)
assert result == exp
def test_repr_with_unicode_data(self):
with pd.core.config.option_context("display.encoding", 'UTF-8'):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\u" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip(self):
mi = MultiIndex.from_product([list('ab'), range(3)],
names=['first', 'second'])
str(mi)
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
mi_u = MultiIndex.from_product(
[list(u'ab'), range(3)], names=['first', 'second'])
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
# formatting
if PY3:
str(mi)
else:
compat.text_type(mi)
# long format
mi = MultiIndex.from_product([list('abcdefg'), range(10)],
names=['first', 'second'])
if PY3:
tm.assert_index_equal(eval(repr(mi)), mi, exact=True)
else:
result = eval(repr(mi))
# string coerces to unicode
tm.assert_index_equal(result, mi, exact=False)
assert mi.get_level_values('first').inferred_type == 'string'
assert result.get_level_values('first').inferred_type == 'unicode'
result = eval(repr(mi_u))
tm.assert_index_equal(result, mi_u, exact=True)
def test_str(self):
# tested elsewhere
pass
def test_unicode_string_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
if PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_isna_behavior(self):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(self.index)
def test_level_setting_resets_attributes(self):
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_is_monotonic_increasing(self):
i = MultiIndex.from_product([np.arange(10),
np.arange(10)], names=['one', 'two'])
assert i.is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex.from_product([[1.0, np.nan, 2.0], ['a', 'b', 'c']])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values).is_monotonic
assert not Index(i.values)._is_strictly_monotonic_increasing
# string ordering
i = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic
assert not Index(i.values).is_monotonic
assert not i._is_strictly_monotonic_increasing
assert not Index(i.values)._is_strictly_monotonic_increasing
i = MultiIndex(levels=[['bar', 'baz', 'foo', 'qux'],
['mom', 'next', 'zenith']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[1, 2, 3, 4], ['gb00b03mlx29', 'lu0197800237',
'nl0000289783',
'nl0000289965', 'nl0000301109']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic
assert not i._is_strictly_monotonic_increasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic
assert Index(i.values).is_monotonic
assert i._is_strictly_monotonic_increasing
assert Index(i.values)._is_strictly_monotonic_increasing
def test_is_monotonic_decreasing(self):
i = MultiIndex.from_product([np.arange(9, -1, -1),
np.arange(9, -1, -1)],
names=['one', 'two'])
assert i.is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10),
np.arange(10, 0, -1)],
names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([np.arange(10, 0, -1),
np.arange(10)], names=['one', 'two'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex.from_product([[2.0, np.nan, 1.0], ['c', 'b', 'a']])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
# string ordering
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['three', 'two', 'one']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert not i.is_monotonic_decreasing
assert not Index(i.values).is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
assert not Index(i.values)._is_strictly_monotonic_decreasing
i = MultiIndex(levels=[['qux', 'foo', 'baz', 'bar'],
['zenith', 'next', 'mom']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
# mixed levels, hits the TypeError
i = MultiIndex(
levels=[[4, 3, 2, 1], ['nl0000301109', 'nl0000289965',
'nl0000289783', 'lu0197800237',
'gb00b03mlx29']],
labels=[[0, 1, 1, 2, 2, 2, 3], [4, 2, 0, 0, 1, 3, -1]],
names=['household_id', 'asset_id'])
assert not i.is_monotonic_decreasing
assert not i._is_strictly_monotonic_decreasing
# empty
i = MultiIndex.from_arrays([[], []])
assert i.is_monotonic_decreasing
assert Index(i.values).is_monotonic_decreasing
assert i._is_strictly_monotonic_decreasing
assert Index(i.values)._is_strictly_monotonic_decreasing
def test_is_strictly_monotonic_increasing(self):
idx = pd.MultiIndex(levels=[['bar', 'baz'], ['mom', 'next']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_increasing
assert not idx._is_strictly_monotonic_increasing
def test_is_strictly_monotonic_decreasing(self):
idx = pd.MultiIndex(levels=[['baz', 'bar'], ['next', 'mom']],
labels=[[0, 0, 1, 1], [0, 0, 0, 1]])
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
def test_reconstruct_sort(self):
# starts off lexsorted & monotonic
mi = MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert mi.is_lexsorted()
assert mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert recons.is_lexsorted()
assert recons.is_monotonic
assert mi is recons
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot convert to lexsorted
mi = pd.MultiIndex.from_tuples([('z', 'a'), ('x', 'a'), ('y', 'b'),
('x', 'b'), ('y', 'a'), ('z', 'b')],
names=['one', 'two'])
assert not mi.is_lexsorted()
assert not mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert not recons.is_lexsorted()
assert not recons.is_monotonic
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# cannot convert to lexsorted
mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
labels=[[0, 1, 0, 2], [2, 0, 0, 1]],
names=['col1', 'col2'])
assert not mi.is_lexsorted()
assert not mi.is_monotonic
recons = mi._sort_levels_monotonic()
assert not recons.is_lexsorted()
assert not recons.is_monotonic
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
def test_reconstruct_remove_unused(self):
# xref to GH 2770
df = DataFrame([['deleteMe', 1, 9],
['keepMe', 2, 9],
['keepMeToo', 3, 9]],
columns=['first', 'second', 'third'])
df2 = df.set_index(['first', 'second'], drop=False)
df2 = df2[df2['first'] != 'deleteMe']
# removed levels are there
expected = MultiIndex(levels=[['deleteMe', 'keepMe', 'keepMeToo'],
[1, 2, 3]],
labels=[[1, 2], [1, 2]],
names=['first', 'second'])
result = df2.index
tm.assert_index_equal(result, expected)
expected = MultiIndex(levels=[['keepMe', 'keepMeToo'],
[2, 3]],
labels=[[0, 1], [0, 1]],
names=['first', 'second'])
result = df2.index.remove_unused_levels()
tm.assert_index_equal(result, expected)
# idempotent
result2 = result.remove_unused_levels()
tm.assert_index_equal(result2, expected)
assert result2.is_(result)
@pytest.mark.parametrize('level0', [['a', 'd', 'b'],
['a', 'd', 'b', 'unused']])
@pytest.mark.parametrize('level1', [['w', 'x', 'y', 'z'],
['w', 'x', 'y', 'z', 'unused']])
def test_remove_unused_nan(self, level0, level1):
# GH 18417
mi = pd.MultiIndex(levels=[level0, level1],
labels=[[0, 2, -1, 1, -1], [0, 1, 2, 3, 2]])
result = mi.remove_unused_levels()
tm.assert_index_equal(result, mi)
for level in 0, 1:
assert('unused' not in result.levels[level])
@pytest.mark.parametrize('first_type,second_type', [
('int64', 'int64'),
('datetime64[D]', 'str')])
def test_remove_unused_levels_large(self, first_type, second_type):
# GH16556
# because tests should be deterministic (and this test in particular
# checks that levels are removed, which is not the case for every
# random input):
rng = np.random.RandomState(4) # seed is arbitrary value that works
size = 1 << 16
df = DataFrame(dict(
first=rng.randint(0, 1 << 13, size).astype(first_type),
second=rng.randint(0, 1 << 10, size).astype(second_type),
third=rng.rand(size)))
df = df.groupby(['first', 'second']).sum()
df = df[df.third < 0.1]
result = df.index.remove_unused_levels()
assert len(result.levels[0]) < len(df.index.levels[0])
assert len(result.levels[1]) < len(df.index.levels[1])
assert result.equals(df.index)
expected = df.reset_index().set_index(['first', 'second']).index
tm.assert_index_equal(result, expected)
def test_isin(self):
values = [('foo', 2), ('bar', 3), ('quux', 4)]
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = MultiIndex.from_arrays([[], []])
result = idx.isin(values)
assert len(result) == 0
assert result.dtype == np.bool_
@pytest.mark.skipif(PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_not_pypy(self):
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
np.array([False, False]))
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
np.array([False, False]))
@pytest.mark.skipif(not PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_pypy(self):
idx = MultiIndex.from_arrays([['foo', 'bar'], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([('bar', np.nan)]),
np.array([False, True]))
tm.assert_numpy_array_equal(idx.isin([('bar', float('nan'))]),
np.array([False, True]))
def test_isin_level_kwarg(self):
idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange(
4)])
vals_0 = ['foo', 'bar', 'quux']
vals_1 = [2, 3, 10]
expected = | np.array([False, False, True, True]) | numpy.array |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import treecorr
from test_helper import get_from_wiki, get_script_name, do_pickle, CaptureLog
from test_helper import assert_raises, timer, assert_warns
from numpy import sin, cos, tan, arcsin, arccos, arctan, arctan2, pi
@timer
def test_direct():
# If the catalogs are small enough, we can do a direct calculation to see if comes out right.
# This should exactly match the treecorr result if brute_force=True
ngal = 200
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22)
min_sep = 1.
max_sep = 50.
nbins = 50
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, brute=True)
gg.process(cat1, cat2)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
for i in range(ngal):
# It's hard to do all the pairs at once with numpy operations (although maybe possible).
# But we can at least do all the pairs for each entry in cat1 at once with arrays.
rsq = (x1[i]-x2)**2 + (y1[i]-y2)**2
r = np.sqrt(rsq)
logr = np.log(r)
expmialpha = ((x1[i]-x2) - 1j*(y1[i]-y2)) / r
ww = w1[i] * w2
xip = ww * (g11[i] + 1j*g21[i]) * (g12 - 1j*g22)
xim = ww * (g11[i] + 1j*g21[i]) * (g12 + 1j*g22) * expmialpha**4
index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
mask = (index >= 0) & (index < nbins)
np.add.at(true_npairs, index[mask], 1)
np.add.at(true_weight, index[mask], ww[mask])
np.add.at(true_xip, index[mask], xip[mask])
np.add.at(true_xim, index[mask], xim[mask])
true_xip /= true_weight
true_xim /= true_weight
print('true_npairs = ',true_npairs)
print('diff = ',gg.npairs - true_npairs)
np.testing.assert_array_equal(gg.npairs, true_npairs)
print('true_weight = ',true_weight)
print('diff = ',gg.weight - true_weight)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
print('true_xip = ',true_xip)
print('gg.xip = ',gg.xip)
print('gg.xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr2 script works correctly.
config = treecorr.config.read_config('configs/gg_direct.yaml')
cat1.write(config['file_name'])
cat2.write(config['file_name2'])
treecorr.corr2(config)
data = fitsio.read(config['gg_file_name'])
np.testing.assert_allclose(data['r_nom'], gg.rnom)
np.testing.assert_allclose(data['npairs'], gg.npairs)
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['xip'], gg.xip, rtol=1.e-3)
np.testing.assert_allclose(data['xip_im'], gg.xip_im, rtol=1.e-3)
np.testing.assert_allclose(data['xim'], gg.xim, rtol=1.e-3)
np.testing.assert_allclose(data['xim_im'], gg.xim_im, rtol=1.e-3)
# Repeat with binslop = 0.
# And don't do any top-level recursion so we actually test not going to the leaves.
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
max_top=0)
gg.process(cat1, cat2)
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
print('diff = ',gg.xim - true_xim.real)
print('max diff = ',np.max(np.abs(gg.xim - true_xim.real)))
print('rel diff = ',(gg.xim - true_xim.real)/true_xim.real)
# This is the one that is highly affected by the approximation from averaging the shears
# before projecting, rather than averaging each shear projected to its own connecting line.
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-3, atol=3.e-4)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, atol=1.e-3)
# Check a few basic operations with a GGCorrelation object.
do_pickle(gg)
gg2 = gg.copy()
gg2 += gg
np.testing.assert_allclose(gg2.npairs, 2*gg.npairs)
np.testing.assert_allclose(gg2.weight, 2*gg.weight)
np.testing.assert_allclose(gg2.meanr, 2*gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, 2*gg.meanlogr)
np.testing.assert_allclose(gg2.xip, 2*gg.xip)
np.testing.assert_allclose(gg2.xip_im, 2*gg.xip_im)
np.testing.assert_allclose(gg2.xim, 2*gg.xim)
np.testing.assert_allclose(gg2.xim_im, 2*gg.xim_im)
gg2.clear()
gg2 += gg
np.testing.assert_allclose(gg2.npairs, gg.npairs)
np.testing.assert_allclose(gg2.weight, gg.weight)
np.testing.assert_allclose(gg2.meanr, gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg2.xip, gg.xip)
np.testing.assert_allclose(gg2.xip_im, gg.xip_im)
np.testing.assert_allclose(gg2.xim, gg.xim)
np.testing.assert_allclose(gg2.xim_im, gg.xim_im)
ascii_name = 'output/gg_ascii.txt'
gg.write(ascii_name, precision=16)
gg3 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
gg3.read(ascii_name)
np.testing.assert_allclose(gg3.npairs, gg.npairs)
np.testing.assert_allclose(gg3.weight, gg.weight)
np.testing.assert_allclose(gg3.meanr, gg.meanr)
np.testing.assert_allclose(gg3.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg3.xip, gg.xip)
np.testing.assert_allclose(gg3.xip_im, gg.xip_im)
np.testing.assert_allclose(gg3.xim, gg.xim)
np.testing.assert_allclose(gg3.xim_im, gg.xim_im)
fits_name = 'output/gg_fits.fits'
gg.write(fits_name)
gg4 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
gg4.read(fits_name)
np.testing.assert_allclose(gg4.npairs, gg.npairs)
np.testing.assert_allclose(gg4.weight, gg.weight)
np.testing.assert_allclose(gg4.meanr, gg.meanr)
np.testing.assert_allclose(gg4.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg4.xip, gg.xip)
np.testing.assert_allclose(gg4.xip_im, gg.xip_im)
np.testing.assert_allclose(gg4.xim, gg.xim)
np.testing.assert_allclose(gg4.xim_im, gg.xim_im)
with assert_raises(TypeError):
gg2 += config
gg4 = treecorr.GGCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins)
with assert_raises(ValueError):
gg2 += gg4
gg5 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins)
with assert_raises(ValueError):
gg2 += gg5
gg6 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2)
with assert_raises(ValueError):
gg2 += gg6
@timer
def test_direct_spherical():
# Repeat in spherical coords
ngal = 100
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) ) + 200 # Put everything at large y, so small angle on sky
z1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) ) + 200
z2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
ra1, dec1 = coord.CelestialCoord.xyz_to_radec(x1,y1,z1)
ra2, dec2 = coord.CelestialCoord.xyz_to_radec(x2,y2,z2)
cat1 = treecorr.Catalog(ra=ra1, dec=dec1, ra_units='rad', dec_units='rad', w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(ra=ra2, dec=dec2, ra_units='rad', dec_units='rad', w=w2, g1=g12, g2=g22)
min_sep = 1.
max_sep = 10.
nbins = 50
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
sep_units='deg', brute=True)
gg.process(cat1, cat2)
r1 = np.sqrt(x1**2 + y1**2 + z1**2)
r2 = np.sqrt(x2**2 + y2**2 + z2**2)
x1 /= r1; y1 /= r1; z1 /= r1
x2 /= r2; y2 /= r2; z2 /= r2
north_pole = coord.CelestialCoord(0*coord.radians, 90*coord.degrees)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
rad_min_sep = min_sep * coord.degrees / coord.radians
c1 = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra1, dec1)]
c2 = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra2, dec2)]
for i in range(ngal):
for j in range(ngal):
rsq = (x1[i]-x2[j])**2 + (y1[i]-y2[j])**2 + (z1[i]-z2[j])**2
r = np.sqrt(rsq)
logr = np.log(r)
index = np.floor(np.log(r/rad_min_sep) / bin_size).astype(int)
if index < 0 or index >= nbins:
continue
# Rotate shears to coordinates where line connecting is horizontal.
# Original orientation is where north is up.
theta1 = 90*coord.degrees - c1[i].angleBetween(north_pole, c2[j])
theta2 = 90*coord.degrees - c2[j].angleBetween(north_pole, c1[i])
exp2theta1 = np.cos(2*theta1) + 1j * np.sin(2*theta1)
exp2theta2 = np.cos(2*theta2) + 1j * np.sin(2*theta2)
g1 = g11[i] + 1j * g21[i]
g2 = g12[j] + 1j * g22[j]
g1 *= exp2theta1
g2 *= exp2theta2
ww = w1[i] * w2[j]
xip = ww * g1 * np.conjugate(g2)
xim = ww * g1 * g2
true_npairs[index] += 1
true_weight[index] += ww
true_xip[index] += xip
true_xim[index] += xim
true_xip /= true_weight
true_xim /= true_weight
print('true_npairs = ',true_npairs)
print('diff = ',gg.npairs - true_npairs)
np.testing.assert_array_equal(gg.npairs, true_npairs)
print('true_weight = ',true_weight)
print('diff = ',gg.weight - true_weight)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
print('true_xip = ',true_xip)
print('gg.xip = ',gg.xip)
print('gg.xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr2 script works correctly.
config = treecorr.config.read_config('configs/gg_direct_spherical.yaml')
cat1.write(config['file_name'])
cat2.write(config['file_name2'])
treecorr.corr2(config)
data = fitsio.read(config['gg_file_name'])
np.testing.assert_allclose(data['r_nom'], gg.rnom)
np.testing.assert_allclose(data['npairs'], gg.npairs)
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['xip'], gg.xip, rtol=1.e-3)
np.testing.assert_allclose(data['xip_im'], gg.xip_im, rtol=1.e-3)
np.testing.assert_allclose(data['xim'], gg.xim, rtol=1.e-3)
np.testing.assert_allclose(data['xim_im'], gg.xim_im, rtol=1.e-3)
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
sep_units='deg', bin_slop=0, max_top=0)
gg.process(cat1, cat2)
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-3, atol=1.e-6)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-3, atol=1.e-6)
diff = np.abs(gg.xim - true_xim.real)
reldiff = diff / true_xim.real
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-3, atol=2.e-4)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-3, atol=2.e-4)
@timer
def test_pairwise():
# Test the pairwise option.
ngal = 1000
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
w1 = np.ones_like(w1)
w2 = np.ones_like(w2)
cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22)
min_sep = 5.
max_sep = 50.
nbins = 10
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
with assert_warns(FutureWarning):
gg.process_pairwise(cat1, cat2)
gg.finalize(cat1.varg, cat2.varg)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
rsq = (x1-x2)**2 + (y1-y2)**2
r = np.sqrt(rsq)
logr = np.log(r)
expmialpha = ((x1-x2) - 1j*(y1-y2)) / r
ww = w1 * w2
xip = ww * (g11 + 1j*g21) * (g12 - 1j*g22)
xim = ww * (g11 + 1j*g21) * (g12 + 1j*g22) * expmialpha**4
index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
mask = (index >= 0) & (index < nbins)
np.add.at(true_npairs, index[mask], 1)
np.add.at(true_weight, index[mask], ww[mask])
np.add.at(true_xip, index[mask], xip[mask])
np.add.at(true_xim, index[mask], xim[mask])
true_xip /= true_weight
true_xim /= true_weight
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
# If cats have names, then the logger will mention them.
# Also, test running with optional args.
cat1.name = "first"
cat2.name = "second"
with CaptureLog() as cl:
gg.logger = cl.logger
with assert_warns(FutureWarning):
gg.process_pairwise(cat1, cat2, metric='Euclidean', num_threads=2)
assert "for cats first, second" in cl.output
@timer
def test_gg():
# cf. http://adsabs.harvard.edu/abs/2002A%26A...389..729S for the basic formulae I use here.
#
# Use gamma_t(r) = gamma0 r^2/r0^2 exp(-r^2/2r0^2)
# i.e. gamma(r) = -gamma0 exp(-r^2/2r0^2) (x+iy)^2 / r0^2
#
# The Fourier transform is: gamma~(k) = -2 pi gamma0 r0^4 k^2 exp(-r0^2 k^2/2) / L^2
# P(k) = (1/2pi) <|gamma~(k)|^2> = 2 pi gamma0^2 r0^8 k^4 / L^4 exp(-r0^2 k^2)
# xi+(r) = (1/2pi) int( dk k P(k) J0(kr) )
# = pi/16 gamma0^2 (r0/L)^2 exp(-r^2/4r0^2) (r^4 - 16r^2r0^2 + 32r0^4)/r0^4
# xi-(r) = (1/2pi) int( dk k P(k) J4(kr) )
# = pi/16 gamma0^2 (r0/L)^2 exp(-r^2/4r0^2) r^4/r0^4
# Note: I'm not sure I handled the L factors correctly, but the units at the end need
# to be gamma^2, so it needs to be (r0/L)^2.
gamma0 = 0.05
r0 = 10.
if __name__ == "__main__":
ngal = 1000000
L = 50.*r0 # Not infinity, so this introduces some error. Our integrals were to infinity.
tol_factor = 1
else:
ngal = 100000
L = 50.*r0
# Rather than have a single set tolerance, we tune the tolerances for the above
# __main__ setup, but scale up by a factor of 5 for the quicker run.
tol_factor = 5
rng = np.random.RandomState(8675309)
x = (rng.random_sample(ngal)-0.5) * L
y = (rng.random_sample(ngal)-0.5) * L
r2 = (x**2 + y**2)/r0**2
g1 = -gamma0 * np.exp(-r2/2.) * (x**2-y**2)/r0**2
g2 = -gamma0 * np.exp(-r2/2.) * (2.*x*y)/r0**2
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, x_units='arcmin', y_units='arcmin')
gg = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin',
verbose=1)
gg.process(cat)
# log(<R>) != <logR>, but it should be close:
print('meanlogr - log(meanr) = ',gg.meanlogr - np.log(gg.meanr))
np.testing.assert_allclose(gg.meanlogr, np.log(gg.meanr), atol=1.e-3)
r = gg.meanr
temp = np.pi/16. * gamma0**2 * (r0/L)**2 * np.exp(-0.25*r**2/r0**2)
true_xip = temp * (r**4 - 16.*r**2*r0**2 + 32.*r0**4)/r0**4
true_xim = temp * r**4/r0**4
print('gg.xip = ',gg.xip)
print('true_xip = ',true_xip)
print('ratio = ',gg.xip / true_xip)
print('diff = ',gg.xip - true_xip)
print('max diff = ',max(abs(gg.xip - true_xip)))
# It's within 10% everywhere except at the zero crossings.
np.testing.assert_allclose(gg.xip, true_xip, rtol=0.1 * tol_factor, atol=1.e-7 * tol_factor)
print('xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip_im, 0, atol=2.e-7 * tol_factor)
print('gg.xim = ',gg.xim)
print('true_xim = ',true_xim)
print('ratio = ',gg.xim / true_xim)
print('diff = ',gg.xim - true_xim)
print('max diff = ',max(abs(gg.xim - true_xim)))
np.testing.assert_allclose(gg.xim, true_xim, rtol=0.1 * tol_factor, atol=2.e-7 * tol_factor)
print('xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim_im, 0, atol=1.e-7 * tol_factor)
# Should also work as a cross-correlation with itself
gg.process(cat,cat)
np.testing.assert_allclose(gg.meanlogr, np.log(gg.meanr), atol=1.e-3)
assert max(abs(gg.xip - true_xip)) < 3.e-7 * tol_factor
assert max(abs(gg.xip_im)) < 2.e-7 * tol_factor
assert max(abs(gg.xim - true_xim)) < 3.e-7 * tol_factor
assert max(abs(gg.xim_im)) < 1.e-7 * tol_factor
# We check the accuracy of the MapSq calculation below in test_mapsq.
# Here we just check that it runs, round trips correctly through an output file,
# and gives the same answer when run through corr2.
mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq()
print('mapsq = ',mapsq)
print('mxsq = ',mxsq)
mapsq_file = 'output/gg_m2.txt'
gg.writeMapSq(mapsq_file, precision=16)
data = np.genfromtxt(os.path.join('output','gg_m2.txt'), names=True)
np.testing.assert_allclose(data['Mapsq'], mapsq)
np.testing.assert_allclose(data['Mxsq'], mxsq)
# Check that we get the same result using the corr2 function:
cat.write(os.path.join('data','gg.dat'))
config = treecorr.read_config('configs/gg.yaml')
config['verbose'] = 0
config['precision'] = 8
treecorr.corr2(config)
corr2_output = np.genfromtxt(os.path.join('output','gg.out'), names=True, skip_header=1)
print('gg.xip = ',gg.xip)
print('from corr2 output = ',corr2_output['xip'])
print('ratio = ',corr2_output['xip']/gg.xip)
print('diff = ',corr2_output['xip']-gg.xip)
np.testing.assert_allclose(corr2_output['xip'], gg.xip, rtol=1.e-4)
print('gg.xim = ',gg.xim)
print('from corr2 output = ',corr2_output['xim'])
print('ratio = ',corr2_output['xim']/gg.xim)
print('diff = ',corr2_output['xim']-gg.xim)
np.testing.assert_allclose(corr2_output['xim'], gg.xim, rtol=1.e-4)
print('xip_im from corr2 output = ',corr2_output['xip_im'])
print('max err = ',max(abs(corr2_output['xip_im'])))
np.testing.assert_allclose(corr2_output['xip_im'], 0, atol=2.e-7 * tol_factor)
print('xim_im from corr2 output = ',corr2_output['xim_im'])
print('max err = ',max(abs(corr2_output['xim_im'])))
np.testing.assert_allclose(corr2_output['xim_im'], 0, atol=2.e-7 * tol_factor)
# Check m2 output
corr2_output2 = np.genfromtxt(os.path.join('output','gg_m2.out'), names=True)
print('mapsq = ',mapsq)
print('from corr2 output = ',corr2_output2['Mapsq'])
print('ratio = ',corr2_output2['Mapsq']/mapsq)
print('diff = ',corr2_output2['Mapsq']-mapsq)
np.testing.assert_allclose(corr2_output2['Mapsq'], mapsq, rtol=1.e-4)
print('mxsq = ',mxsq)
print('from corr2 output = ',corr2_output2['Mxsq'])
print('ratio = ',corr2_output2['Mxsq']/mxsq)
print('diff = ',corr2_output2['Mxsq']-mxsq)
np.testing.assert_allclose(corr2_output2['Mxsq'], mxsq, rtol=1.e-4)
# OK to have m2 output, but not gg
del config['gg_file_name']
treecorr.corr2(config)
corr2_output2 = np.genfromtxt(os.path.join('output','gg_m2.out'), names=True)
np.testing.assert_allclose(corr2_output2['Mapsq'], mapsq, rtol=1.e-4)
np.testing.assert_allclose(corr2_output2['Mxsq'], mxsq, rtol=1.e-4)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check the fits write option
out_file_name = os.path.join('output','gg_out.fits')
gg.write(out_file_name)
data = fitsio.read(out_file_name)
np.testing.assert_allclose(data['r_nom'], np.exp(gg.logr))
np.testing.assert_allclose(data['meanr'], gg.meanr)
np.testing.assert_allclose(data['meanlogr'], gg.meanlogr)
np.testing.assert_allclose(data['xip'], gg.xip)
np.testing.assert_allclose(data['xim'], gg.xim)
np.testing.assert_allclose(data['xip_im'], gg.xip_im)
np.testing.assert_allclose(data['xim_im'], gg.xim_im)
np.testing.assert_allclose(data['sigma_xip'], np.sqrt(gg.varxip))
np.testing.assert_allclose(data['sigma_xim'], np.sqrt(gg.varxim))
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['npairs'], gg.npairs)
# Check the read function
gg2 = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin')
gg2.read(out_file_name)
np.testing.assert_allclose(gg2.logr, gg.logr)
np.testing.assert_allclose(gg2.meanr, gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg2.xip, gg.xip)
np.testing.assert_allclose(gg2.xim, gg.xim)
np.testing.assert_allclose(gg2.xip_im, gg.xip_im)
np.testing.assert_allclose(gg2.xim_im, gg.xim_im)
np.testing.assert_allclose(gg2.varxip, gg.varxip)
| np.testing.assert_allclose(gg2.varxim, gg.varxim) | numpy.testing.assert_allclose |
#
# SOFENN
# Self-Organizing Fuzzy Neural Network
#
# (sounds like soften)
#
#
# Implemented per description in
# An on-line algorithm for creating self-organizing
# fuzzy neural networks
# Leng, Prasad, McGinnity (2004)
#
#
# <NAME> - 2019
# github.com/andrewre23
#
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Dense
from keras.utils import to_categorical
from sklearn.metrics import mean_absolute_error
# custom Fuzzy Layers
from .layers import FuzzyLayer, NormalizedLayer, WeightedLayer, OutputLayer
class FuzzyNetwork(object):
"""
Fuzzy Network
=============
-Implemented per description in:
"An on-line algorithm for creating self-organizing
fuzzy neural networks" - Leng, Prasad, McGinnity (2004)
-Composed of 5 layers with varying "fuzzy rule" nodes
* = samples
Parameters
==========
- X_train : training input data
- shape :(train_*, features)
- X_test : testing input data
- shape: (test_*, features)
- y_train : training output data
- shape: (train_*,)
- y_test : testing output data
- shape: (test_*,)
Attributes
==========
- prob_type : str
- regression/classification problem
- classes : int
- number of output classes for classification problems
- neurons : int
- number of initial neurons
- max_neurons : int
- max number of neurons
- ifpart_thresh : float
- threshold for if-part
- ifpart_samples : float
- percent of samples needed to meet ifpart criterion
- err_delta : float
- threshold for error criterion whether new neuron to be added
- debug : boolean
- debug flag
Methods
=======
- build_model :
- build Fuzzy Network and set as model attribute
- compile_model :
- compile Fuzzy Network
- loss_function :
- custom loss function per Leng, Pras<NAME> (2004)
- train_model :
- train on data
- model_predictions :
- yield model predictions without full evaluation
- model_evaluations :
- evaluate models and yield metrics
- error_criterion :
- considers generalized performance of overall network
- add neuron if error above predefined error threshold (delta)
- if_part_criterion :
- checks if current fuzzy rules cover/cluster input vector suitably
Secondary Methods
=================
- get_layer :
- return layer object from model by name
- get_layer_weights :
- get current weights from any layer in model
- get_layer_output :
- get test output from any layer in model
Protected Methods
=================
- initialize_centers :
- initialize neuron centers
- initialize_widths :
- initialize neuron weights based on parameter
"""
def __init__(self,
X_train, X_test, y_train, y_test, # data attributes
neurons=1, max_neurons=100, # neuron initialization parameters
ifpart_thresh=0.1354, ifpart_samples=0.95, # ifpart threshold and percentage of samples needed
err_delta=0.12, # error criterion
prob_type='classification', # type of problem (classification/regression)
debug=True, **kwargs):
# set debug flag
self._debug = debug
# set output problem type
if prob_type.lower() not in ['classification', 'regression']:
raise ValueError("Invalid problem type")
self.prob_type = prob_type
# set data attributes
# validate numpy arrays
for data in [X_train, X_test, y_train, y_test]:
if type(data) is not np.ndarray:
raise ValueError("Input data must be NumPy arrays")
# validate one-hot-encoded y values if classification
if self.prob_type == 'classification':
# convert to one-hot-encoding if y is one dimensional
if y_test.ndim == 1:
print('Converting y data to one-hot-encodings')
# get number of samples in training data
train_samples = y_train.shape[0]
# convert complete y vector at once then split again
y = np.concatenate([y_train, y_test])
y = to_categorical(y)
y_train = y[:train_samples]
y_test = y[train_samples:]
# set number of classes based on
self.classes = y_test.shape[1]
# set data attributes
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
# set neuron attributes
# initial number of neurons
if type(neurons) is not int or neurons <= 0:
raise ValueError("Must enter positive integer")
self.neurons = neurons
# max number of neurons
if type(max_neurons) is not int or max_neurons < neurons:
raise ValueError("Must enter positive integer no less than number of neurons")
self.max_neurons = max_neurons
# verify non-negative parameters
non_neg_params = {'ifpart_thresh': ifpart_thresh,
'ifpart_samples': ifpart_samples,
'err_delta': err_delta}
for param, val in non_neg_params.items():
if val < 0:
raise ValueError("Entered negative parameter: {}".format(param))
# set calculation attributes
self._ifpart_thresh = ifpart_thresh
self._ifpart_samples = ifpart_samples
self._err_delta = err_delta
# define model and set model attribute
self.model = None
self.build_model(**kwargs)
def build_model(self, **kwargs):
"""
Build and initialize Model if needed
Layers
======
0 - Input Layer
input dataset
- input shape : (*, features)
1 - Radial Basis Function Layer (Fuzzy Layer)
layer to hold fuzzy rules for complex system
- input : x
shape: (*, features)
- output : phi
shape : (*, neurons)
2 - Normalized Layer
normalize each output of previous layer as
relative amount from sum of all previous outputs
- input : phi
shape : (*, neurons)
- output : psi
shape : (*, neurons)
3 - Weighted Layer
multiply bias vector (1+n_features, neurons) by
parameter vector (1+n_features,) of parameters
from each fuzzy rule
multiply each product by output of each rule's
layer from normalized layer
- inputs : [x, psi]
shape : [(*, 1+features), (*, neurons)]
- output : f
shape : (*, neurons)
4 - Output Layer
summation of incoming signals from weighted layer
- input shape : (*, neurons)
- output shape : (*,)
5 - Softmax Layer (classification)
softmax layer for classification problems
- input shape : (*, 1)
- output shape : (*, classes)
"""
if self._debug:
print('Building Fuzzy Network with {} neurons...'
.format(self.neurons))
# get shape of training data
samples, feats = self.X_train.shape
# add layers
inputs = Input(name='Inputs', shape=(feats,))
fuzz = FuzzyLayer(self.neurons)
norm = NormalizedLayer(self.neurons)
weights = WeightedLayer(self.neurons)
raw = OutputLayer()
# run through layers
phi = fuzz(inputs)
psi = norm(phi)
f = weights([inputs, psi])
raw_output = raw(f)
final_out = raw_output
# add softmax layer for classification problem
if self.prob_type is 'classification':
clasify = Dense(self.classes,
name='Softmax', activation='softmax')
classes = clasify(raw_output)
final_out = classes
# TODO: determine logic for activation w/ regression
# # extract activation from kwargs
# if 'activation' not in kwargs:
# activation = 'sigmoid'
# else:
# activation = kwargs['activation']
# preds = Activation(name='OutputActivation', activation=activation)(raw_output)
# remove name from kwargs
if 'name' in kwargs:
kwargs.pop('name')
# define model and set as model attribute
model = Model(inputs=inputs, outputs=final_out,
name='FuzzyNetwork', **kwargs)
self.model = model
if self._debug:
print('...Model successfully built!')
def compile_model(self, init_c=True, random=True, init_s=True, s_0=4.0, **kwargs):
"""
Create and compile model
- sets compiled model as self.model
Parameters
==========
init_c : bool
- run method to initialize centers or take default initializations
random : bool
- take either random samples or first samples that appear in training data
init_s : bool
- run method to initialize widths or take default initializations
s_0 : float
- value for initial centers of neurons
"""
if self._debug:
print('Compiling model...')
# default loss for classification
if self.prob_type is 'classification':
default_loss = self.loss_function
# default loss for regression
else:
default_loss = 'mean_squared_error'
kwargs['loss'] = kwargs.get('loss', default_loss)
# default optimizer for classification
if self.prob_type is 'classification':
default_optimizer = 'adam'
# default optimizer for regression
else:
default_optimizer = 'rmsprop'
kwargs['optimizer'] = kwargs.get('optimizer', default_optimizer)
# default metrics for classification
if self.prob_type is 'classification':
# default for binary classification
if self.y_test.ndim == 2:
default_metrics = ['binary_accuracy']
# default for multi-class classification
else:
default_metrics = ['categorical_accuracy']
# default metrics for regression
else:
default_metrics = ['accuracy']
kwargs['metrics'] = kwargs.get('metrics', default_metrics)
# compile model and show model summary
self.model.compile(**kwargs)
# initialize fuzzy rule centers
if init_c:
self._initialize_centers(random=random)
# initialize fuzzy rule widths
if init_s:
self._initialize_widths(s_0=s_0)
# print model summary
if self._debug:
print(self.model.summary())
@staticmethod
def loss_function(y_true, y_pred):
"""
Custom loss function
E = exp{-sum[i=1,j; 1/2 * [pred(j) - test(j)]^2]}
Parameters
==========
y_true : np.array
- true values
y_pred : np.array
- predicted values
"""
return K.sum(1 / 2 * K.square(y_pred - y_true))
def train_model(self, **kwargs):
"""
Fit model on current training data
"""
if self._debug:
print('Training model...')
# set default verbose setting
default_verbose = 1
kwargs['verbose'] = kwargs.get('verbose', default_verbose)
# set default training epochs
default_epochs = 100
kwargs['epochs'] = kwargs.get('epochs', default_epochs)
# set default training epochs
default_batch_size = 32
kwargs['batch_size'] = kwargs.get('batch_size', default_batch_size)
# fit model to dataset
self.model.fit(self.X_train, self.y_train, **kwargs)
def model_predictions(self):
"""
Evaluate currently trained model and return predictions
Returns
=======
preds : np.array
- predicted values
- shape: (samples,) or (samples, classes)
"""
# get prediction values
preds = self.model.predict(self.X_test)
return preds
def model_evaluation(self):
"""
Evaluate current test dataset on model
"""
# run model evaluation
self.model.evaluate(self.X_test, self.y_test)
def error_criterion(self):
"""
Check error criterion for neuron-adding process
- considers generalization performance of model
Returns
=======
- True:
if criteron satisfied and no need to grow neuron
- False:
if criteron not met and need to add neuron
"""
# mean of absolute test difference
y_pred = self.model_predictions()
return mean_absolute_error(self.y_test, y_pred) <= self._err_delta
def if_part_criterion(self):
"""
Check if-part criterion for neuron-adding process
- considers whether current fuzzy rules suitably cover inputs
- get max of all neuron outputs (pre-normalization)
- test whether max val at or above threshold
- overall criterion met if criterion met for "ifpart_samples" % of samples
Returns
=======
- True:
if criteron satisfied and no need to widen centers
- False:
if criteron not met and need to widen neuron centers
"""
# validate value of threshold parameter
if not 0 < self._ifpart_samples <= 1.0:
raise ValueError('Percentage threshold must be between 0 and 1')
# get max val
fuzz_out = self.get_layer_output(1)
# check if max neuron output is above threshold
maxes = np.max(fuzz_out, axis=-1) >= self._ifpart_thresh
# return True if at least half of samples agree
return (maxes.sum() / len(maxes)) >= self._ifpart_samples
def get_layer(self, layer=None):
"""
Get layer object based on input parameter
- exception of Input layer
Parameters
==========
layer : str or int
- layer to get weights from
- input can be layer name or index
"""
# if named parameter
if layer in [mlayer.name for mlayer in self.model.layers]:
layer_out = self.model.get_layer(layer)
# if indexed parameter
elif layer in range(len(self.model.layers)):
layer_out = self.model.layers[layer]
else:
raise ValueError('Error: layer must be layer name or index')
return layer_out
def get_layer_weights(self, layer=None):
"""
Get weights of layer based on input parameter
- exception of Input layer
Parameters
==========
layer : str or int
- layer to get weights from
- input can be layer name or index
"""
return self.get_layer(layer).get_weights()
def get_layer_output(self, layer=None):
"""
Get output of layer based on input parameter
- exception of Input layer
Parameters
==========
layer : str or int
- layer to get test output from
- input can be layer name or index
"""
# create prediction from intermediate model ending at desired layer
last_layer = self.get_layer(layer)
intermediate_model = Model(inputs=self.model.input,
outputs=last_layer.output)
return intermediate_model.predict(self.X_test)
def _initialize_centers(self, random=True):
"""
Initialize neuron center weights with samples
from X_train dataset
Parameters
==========
random: bool
- take random samples from training data or
take first n instances (n=# of neurons)
"""
if random:
# set centers as random sampled index values
samples = np.random.randint(0, len(self.X_train), self.neurons)
x_i = np.array([self.X_train[samp] for samp in samples])
else:
# take first few samples, one for each neuron
x_i = self.X_train[:self.neurons]
# reshape from (neurons, features) to (features, neurons)
c_init = x_i.T
# set weights
c, s = self.get_layer_weights(1)
start_weights = [c_init, s]
self.get_layer(1).set_weights(start_weights)
# validate weights updated as expected
final_weights = self.get_layer_weights(1)
assert np.allclose(start_weights[0], final_weights[0])
assert np.allclose(start_weights[1], final_weights[1])
def _initialize_widths(self, s_0=4.0):
"""
Initialize neuron widths
Parameters
==========
s_0 : float
- initial sigma value for all neuron centers
"""
# get current center and width weights
c, s = self.get_layer_weights(1)
# repeat s_0 value to array shaped like s
s_init = np.repeat(s_0, s.size).reshape(s.shape)
# set weights
start_weights = [c, s_init]
self.get_layer(1).set_weights(start_weights)
# validate weights updated as expected
final_weights = self.get_layer_weights(1)
assert | np.allclose(start_weights[0], final_weights[0]) | numpy.allclose |
"""
Module for managing BRATS dataset
"""
from datasets.BrainDataset import modalities
from datasets.LabeledBrainDataset import LabeledBrainDataset
import os
import fnmatch
import numpy as np
import re
import json
import pdb
NPROCS = 40
TRAIN_LOOP = "train_loop"
TRAIN = "train"
VALIDATION = "validation"
TEST = "test"
class BRATSLabeled(LabeledBrainDataset):
def __init__(self, params):
super().__init__(params)
self._no_of_classes = 4
self._train_set_x, self._train_set_y, \
self._validation_set_x, self._validation_set_y, \
self._test_set_x, self._test_set_y = self.load_float_brains(self._data_dir)
def dataset_id(self, params):
"""
This method interprets the parameters and generate an id
"""
id = 'BRATSLabeled'
id += super().dataset_id(params)
return id
# overriding
@property
def x_shape_train(self):
return self._train_set_x_shape
# overriding
@property
def x_shape_eval(self):
return self._train_set_x_shape
# overriding
def get_label(self, filename):
# label = -1
with open(self._labels_file, 'r') as json_file:
labels_dict = json.load(json_file)
label = np.nonzero(labels_dict[filename])[0].astype(np.int32)[0]
return label
# overriding
def load_file_names(self, root, data_type):
original_files = []
label_files = []
with open(self._split_file, 'r') as file:
files_to_find = json.load(file)[data_type]
for path, dirs, files in os.walk(root):
if self._modalities is not None:
reg_filter = '*_' + str(modalities[self._modalities[0]]) + '_*'
for f in fnmatch.filter(files, reg_filter):
# idx = f.find('_' + str(modalities[self._modalities[0]]))
# idx = f.find('_')
# label_file_name = f[:idx]
start_idx = f.find('Brats')
end_idx = f.find('_' + str(modalities[self._modalities[0]]))
label_file_name = f[start_idx:end_idx]
if label_file_name in files_to_find:
fullname = root + '/' + f
if self._slices is not None:
slice = re.findall('_([0-9][0-9]*)', f)
if self._slices[0] <= int(slice[0]) <= self._slices[1]:
original_files.append(fullname)
label_files.append(label_file_name)
else:
original_files.append(fullname)
label_files.append(label_file_name)
else:
for f in files:
idx = f.find('_')
label_file_name = f[:idx]
if label_file_name in files_to_find:
fullname = root + '/' + f
# idx = f.find('_' + str(modalities['T2']))
original_files.append(fullname)
label_files.append(label_file_name)
# pdb.set_trace()
dataset_tuple = [original_files, label_files]
return | np.asarray(dataset_tuple) | numpy.asarray |
import logging
from collections import OrderedDict, namedtuple
import numpy as np
import pandas as pd
import scipy.stats
import pyDOE2
from doepipeline.model_utils import make_desirability_function, predict_optimum
class OptimizationResult(namedtuple(
'OptimizationResult', ['predicted_optimum', 'converged',
'tol', 'reached_limits', 'empirically_found'])):
""" `namedtuple` encapsulating results from optimization. """
class UnsupportedFactorType(Exception):
pass
class UnsupportedDesign(Exception):
pass
class DesignerError(Exception):
pass
class NumericFactor:
""" Base class for numeric factors.
Simple class which encapsulates current settings and allowed
max and min.
Can't be instantiated.
"""
def __init__(self, factor_max, factor_min, current_low=None, current_high=None):
if type(self) == NumericFactor:
raise TypeError('NumericFactor can not be instantiated. Use '
'sub-classes instead.')
self.current_low = current_low
self.current_high = current_high
self.max = factor_max
self.min = factor_min
self.screening_levels = 5
@property
def span(self):
""" Distance between current high and low. """
return self.current_high - self.current_low
@property
def center(self):
""" Mean value of current high and low. """
return (self.current_high + self.current_low) / 2.0
def __repr__(self):
return ('{}(factor_max={}, factor_min={}, current_low={}, '
'current_high={})').format(self.__class__.__name__,
self.max,
self.min,
self.current_low,
self.current_high)
class QuantitativeFactor(NumericFactor):
""" Real value factors. """
class OrdinalFactor(NumericFactor):
""" Ordinal (integer) factors.
Attributes are checked to be integers (or None/inf if allowed).
"""
def __setattr__(self, attribute, value):
""" Check values `current_low`, `current_high`, `max` and `min`.
:param str attribute: Attribute name
:param Any value: New value
"""
numeric_attributes = ('current_low', 'current_high',
'max', 'min')
if attribute in numeric_attributes:
err_msg = '{} requires an integer, not {}'.format(attribute, value)
if attribute == 'max' and value == float('inf'):
pass
elif attribute == 'min' and value == float('-inf'):
pass
elif isinstance(value, float) and not value.is_integer():
raise ValueError(err_msg)
elif isinstance(value, (float, int)):
value = int(value)
elif attribute in ('current_low', 'current_high') and value is None:
pass
else:
raise ValueError(err_msg)
super(OrdinalFactor, self).__setattr__(attribute, value)
class CategoricalFactor:
""" Multilevel categorical factors. """
def __init__(self, values, fixed_value=None):
self.values = values
self.fixed_value = fixed_value
def __repr__(self):
return '{}(values={}, fixed_value={})'.format(self.__class__.__name__,
self.values,
self.fixed_value)
class ExperimentDesigner:
_matrix_designers = {
'fullfactorial2levels': pyDOE2.ff2n,
'fullfactorial3levels': lambda n: pyDOE2.fullfact([3] * n),
'placketburman': pyDOE2.pbdesign,
'boxbehnken': lambda n: pyDOE2.bbdesign(n, 1),
'ccc': lambda n: pyDOE2.ccdesign(n, (0, 3), face='ccc'),
'ccf': lambda n: pyDOE2.ccdesign(n, (0, 3), face='ccf'),
'cci': lambda n: pyDOE2.ccdesign(n, (0, 3), face='cci'),
}
def __init__(self, factors, design_type, responses, skip_screening=True,
at_edges='distort', relative_step=.25, gsd_reduction='auto',
model_selection='brute', n_folds='loo', manual_formula=None,
shrinkage=1.0, q2_limit=0.5, gsd_span_ratio=0.5):
try:
assert at_edges in ('distort', 'shrink'),\
'unknown action at_edges: {0}'.format(at_edges)
assert relative_step is None or 0 < relative_step < 1,\
'relative_step must be float between 0 and 1 not {}'.format(relative_step)
assert model_selection in ('brute', 'greedy', 'manual'), \
'model_selection must be "brute", "greedy", "manual".'
assert n_folds == 'loo' or (isinstance(n_folds, int) and n_folds > 0), \
'n_folds must be "loo" or positive integer'
assert 0.9 <= shrinkage <= 1, 'shrinkage must be float between 0.9 and 1.0, not {}'.format(shrinkage)
assert 0 <= q2_limit <= 1, 'q2_limit must be float between 0 and 1, not {}'.format(q2_limit)
if model_selection == 'manual':
assert isinstance(manual_formula, str), \
'If model_selection is "manual" formula must be provided.'
except AssertionError as e:
raise ValueError(str(e))
self.factors = OrderedDict()
factor_types = list()
for factor_name, f_spec in factors.items():
factor = factor_from_spec(f_spec)
if isinstance(factor, CategoricalFactor) and skip_screening:
raise DesignerError('Can\'t perform optimization with categorical '
'variables without prior screening.')
self.factors[factor_name] = factor
logging.debug('Sets factor {}: {}'.format(factor_name, factor))
factor_types.append(f_spec.get('type', 'continuous'))
self.skip_screening = skip_screening
self.step_length = relative_step
self.design_type = design_type
self.responses = responses
self.response_values = None
self.gsd_reduction = gsd_reduction
self.model_selection = model_selection
self.n_folds = n_folds
self.shrinkage = shrinkage
self.q2_limit = q2_limit
self._formula = manual_formula
self._edge_action = at_edges
self._allowed_phases = ['optimization', 'screening']
self._phase = 'optimization' if self.skip_screening else 'screening'
self._n_screening_evaluations = 0
self._factor_types = factor_types
self._gsd_span_ratio = gsd_span_ratio
self._stored_transform = lambda x: x
self._best_experiment = {
'optimal_x': pd.Series([]),
'optimal_y': None,
'weighted_y': None}
n = len(self.factors)
try:
self._matrix_designers[self.design_type.lower()]
except KeyError:
raise UnsupportedDesign(self.design_type)
if len(self.responses) > 1:
self._desirabilites = {name: make_desirability_function(factor)
for name, factor in self.responses.items()}
else:
self._desirabilites = None
def new_design(self):
"""
:return: Experimental design-sheet.
:rtype: pandas.DataFrame
"""
if self._phase == 'screening':
return self._new_screening_design(reduction=self.gsd_reduction)
else:
return self._new_optimization_design()
def write_factor_csv(self, out_file):
factors = list()
idx = pd.Index(['fixed_value', 'current_low', 'current_high'])
for name, factor in self.factors.items():
current_min = None
current_high = None
fixed_value = None
if issubclass(type(factor), NumericFactor):
current_min = factor.current_low
current_high = factor.current_high
elif isinstance(factor, CategoricalFactor):
fixed_value = factor.fixed_value
else:
raise NotImplementedError
data = [fixed_value, current_min, current_high]
factors.append(pd.Series(data, index=idx, name=name))
factors_df = pd.DataFrame(factors)
logging.info('Saving factor settings to {}'.format(out_file))
factors_df.to_csv(out_file)
def update_factors_from_csv(self, csv_file):
factors_df = pd.DataFrame.from_csv(csv_file)
logging.info('Reading factor settings from {}'.format(csv_file))
for name, factor in self.factors.items():
logging.info('Updating factor {}'.format(name))
if issubclass(type(factor), NumericFactor):
current_low = factors_df.loc[name]['current_low']
current_high = factors_df.loc[name]['current_high']
logging.info('Factor: {}. Setting current_low to {}'.format(name, current_low))
logging.info('Factor: {}. Setting current_high to {}'.format(name, current_high))
factor.current_low = current_low
factor.current_high = current_high
elif isinstance(factor, CategoricalFactor):
if pd.isnull(factors_df.loc[name]['fixed_value']):
fixed_value = None
logging.info('Factor: {}. Had no fixed_value.'.format(name))
else:
fixed_value = factors_df.loc[name]['fixed_value']
logging.info('Factor: {}. Setting fixed_value to {}.'.format(name, fixed_value))
factor.fixed_value = fixed_value
def get_optimal_settings(self, response):
"""
Calculate optimal factor settings given response. Returns calculated
optimum.
If the current phase is 'screening': returns the factor settings of
the best run and updates the current factor settings.
If the current phase is 'optimization': returns the factor settings of
the predicted optimum, but doesn't update current factor settings in
case a validation step is to be run first
:param pandas.DataFrame response: Response sheet.
:returns: Calculated optimum.
:rtype: OptimizationResult
"""
self._response_values = response.copy()
response = response.copy()
# Perform any transformations or weigh together multiple responses:
treated_response, criterion = self.treat_response(response)
if self._phase == 'screening':
# Find the best screening result and update factors accordingly
self._screening_response = treated_response
self._screening_criterion = criterion
return self._evaluate_screening(treated_response, criterion,
self._gsd_span_ratio)
else:
# Predict optimal parameter settings, but don't update factors
return self._predict_optimum_settings(treated_response, criterion)
def _update_best_experiment(self, result):
update = False
if self._best_experiment['optimal_x'].empty:
update = True
elif result['criterion'] == 'maximize':
if result['weighted_response'] > self._best_experiment['weighted_y']:
update = True
elif result['criterion'] == 'minimize':
if result['weighted_response'] < self._best_experiment['weighted_y']:
update = True
if update:
self._best_experiment['optimal_x'] = result['factor_settings']
self._best_experiment['optimal_y'] = result['response']
self._best_experiment['weighted_y'] = result['weighted_response']
return update
def get_best_experiment(self, experimental_sheet, response_sheet, use_index=1):
"""
Accepts an experimental design and the corresponding response values.
Finds the best experiment and updates self._best_experiment.
Returns the best experiment, to be used in fnc update_factors_from_optimum
"""
assert isinstance(experimental_sheet, pd.core.frame.DataFrame), \
'The input experimental sheet must be a pandas DataFrame'
assert isinstance(response_sheet, pd.core.frame.DataFrame), \
'The input response sheet must be a pandas DataFrame'
assert sorted(experimental_sheet.columns) == sorted(self.factors), \
'The factors of the experimental sheet must match those in the \
pipeline. You input:\n{}\nThey should be:\n{}'.format(
list(experimental_sheet.columns),
list(self.factors.keys()))
assert sorted(response_sheet.columns) == sorted(self.responses), \
'The responses of the response sheet must match those in the \
pipeline. You input:\n{}\nThey should be:\n{}'.format(
list(response_sheet.columns),
list(self.responses.keys()))
response = response_sheet.copy()
treated_response, criterion = self.treat_response(
response, perform_transform=False)
treated_response = treated_response.iloc[:, 0]
if criterion == 'maximize':
optimum_i = treated_response.argsort().iloc[-use_index]
elif criterion == 'minimize':
optimum_i = treated_response.argsort().iloc[use_index - 1]
else:
raise NotImplementedError
optimum_settings = experimental_sheet.iloc[optimum_i]
results = OrderedDict()
optimal_weighted_response = np.array(treated_response.iloc[optimum_i])
optimal_response = response_sheet.iloc[optimum_i]
results['factor_settings'] = optimum_settings
results['weighted_response'] = optimal_weighted_response
results['response'] = optimal_response
results['criterion'] = criterion
results['new_best'] = False
results['old_best'] = self._best_experiment
has_multiple_responses = response_sheet.shape[1] > 1
logging.debug('The best response was found in experiment:\n{}'.format(optimum_settings.name))
logging.debug('The response values were:\n{}'.format(response_sheet.iloc[optimum_i]))
if has_multiple_responses:
logging.debug('The weighed response was:\n{}'.format(treated_response.iloc[optimum_i]))
logging.debug('Will return optimum settings:\n{}'.format(results['factor_settings']))
logging.debug('And best response:\n{}'.format(results['response']))
if self._update_best_experiment(results):
results['new_best'] = True
return results
def update_factors_from_optimum(self, optimal_experiment, tol=0.25, recovery=False):
"""
Updates the factor settings based on how far the current settings are
from those supplied in optimal_experiment['factor_settings'].
:param OrderedDict optimal_experiment: Output from get_best_experiment
:param float tol: Accepted relative distance to design space edge.
:returns: Calculated optimum.
:rtype: OptimizationResult
"""
are_numeric = | np.array(self._factor_types) | numpy.array |
# coding: utf-8
"""
By <EMAIL> at 2018-08-19 22:10:16
"""
import os
import torch
import numpy as np
def default_collate_fn(batch_data):
assert isinstance(batch_data, list) and len(batch_data) > 0
if isinstance(batch_data[0], dict):
new_batch_data = {}
for k in batch_data[0].keys():
new_batch_data[k] = []
for item in batch_data:
for k in new_batch_data.keys():
new_batch_data[k].append(item[k])
return new_batch_data
else:
return list(zip(**batch_data))
class LookAheadBatchStream:
def __init__(self, dataset, subset=None, batch_size=1, shuffle=False,
auto_refresh=True, collate_fn=default_collate_fn,
look_ahead=0, la_pad='none', la_skip=1):
self.dataset = dataset
self.subset = (subset if subset is not None
else np.arange(len(self.dataset), dtype="int32"))
self.ds_size = len(self.subset)
# batching strategy
self.batch_size = batch_size
self.look_ahead = look_ahead
self.la_pad = la_pad
self.la_skip = la_skip
self.shuffle = shuffle
self.batches = None
self.num_batches = None
self.auto_refresh = auto_refresh
# history statistics
self.inst_count = 0
self.batch_count = 0
# current status
self._curr_batch_idx = 0
self._curr_num_insts = None
# behavior
self.collate_fn = collate_fn
if self.auto_refresh:
self.refresh()
def curr_batch_idx(self):
if self._curr_batch_idx is None:
raise RuntimeError("no batch read.")
else:
return self._curr_batch_idx
def curr_batch(self):
return self.batches[self._curr_batch_idx]
def curr_num_insts(self):
if self._curr_num_insts is None:
raise RuntimeError("no batch read.")
else:
return self._curr_num_insts
def __iter__(self):
return self
def __next__(self):
return self.next()
def __len__(self):
size = self.ds_size // self.batch_size
return size + 1 if size * self.batch_size < self.ds_size else size
def next(self):
if (self._curr_batch_idx is not None
and self._curr_batch_idx + 1 >= self.num_batches):
if self.auto_refresh:
self.refresh()
raise StopIteration()
data, look_ahead_data = self._get_data()
return data, look_ahead_data
def _get_data(self):
self._curr_batch_idx = 0 if self._curr_batch_idx is None else self._curr_batch_idx + 1
self._curr_num_insts = len(self.batches[self._curr_batch_idx])
self.inst_count += self._curr_num_insts
self.batch_count += 1
# TODO here can be parallel!
data = [self.dataset[idx] for idx in self.batches[self._curr_batch_idx]]
look_ahead_data = []
# for lkh in range(min(self.look_ahead, len(self) - self._curr_batch_idx - 1)):
for lkh in range(self.look_ahead):
lkh_batch_idx = self._curr_batch_idx + (lkh + 1) * self.la_skip
if lkh_batch_idx >= len(self):
if self.la_pad == 'none': break
elif self.la_pad == 'cycle':
lkh_batch_idx = lkh_batch_idx % len(self)
elif self.la_pad == 'last':
lkh_data = data if len(look_ahead_data) == 0 else look_ahead_data[-1]
look_ahead_data.append(lkh_data)
else:
raise Exception()
lkh_data = [self.dataset[idx] for idx in self.batches[lkh_batch_idx]]
look_ahead_data.append(lkh_data)
if self.collate_fn is not None:
data = self.collate_fn(data)
look_ahead_data = [self.collate_fn(b) for b in look_ahead_data]
return data, look_ahead_data
def refresh(self):
if self.shuffle:
np.random.shuffle(self.subset)
self.batches = []
batch_start = 0
for i in range(self.ds_size // self.batch_size):
self.batches.append(self.subset[
batch_start:batch_start+self.batch_size])
batch_start += self.batch_size
if batch_start != self.ds_size:
self.batches.append(self.subset[batch_start:])
# update batch indicators
self.num_batches = len(self.batches)
self._curr_batch_idx = None
self._curr_num_insts = None
def state_dict(self):
"""
Warning! side effect: np_randomstate will influence other
potion of the program.
"""
state = {
"subset": self.subset,
"batch_size" : self.batch_size,
"shuffle" : self.shuffle,
"batches" : self.batches,
"num_batches" : self.num_batches,
"auto_refresh" : self.auto_refresh,
"inst_count" : self.inst_count,
"batch_count" : self.batch_count,
"_curr_batch_idx" : self._curr_batch_idx,
"_curr_num_insts" : self._curr_num_insts,
"np_randomstate" : | np.random.get_state() | numpy.random.get_state |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
from operator import attrgetter
def multiretrieve(info_list, iterable):
""" Retrieve multiple pieces of information at different levels of iterable object.
"""
info = list(map(lambda s: attrgetter(*info_list)(s), iterable))
info = np.asarray(info).T
info_dict = {name: value for name, value in zip(info_list, info)}
return info_dict
def multidict_merge(composites):
""" Merge multiple dictionaries with the same keys.
"""
if len(composites) in [0, 1]:
return composites
else:
# Initialize an empty dictionary with only keys
merged = dict.fromkeys(list(composites[0].keys()), [])
for k in merged.keys():
merged[k] = list(d[k] for d in composites)
return merged
def pointset_order(pset, center=None, direction='ccw', ret='order'):
"""
Order a point set around a center in a clockwise or counterclockwise way.
:Parameters:
pset : 2D array
Pixel coordinates of the point set.
center : list/tuple/1D array | None
Pixel coordinates of the putative shape center.
direction : str | 'ccw'
Direction of the ordering ('cw' or 'ccw').
:Return:
pset_ordered : 2D array
Sorted pixel coordinates of the point set.
"""
dirdict = {'cw':1, 'ccw':-1}
# Calculate the coordinates of the
if center is None:
pmean = np.mean(pset, axis=0)
pshifted = pset - pmean
else:
pshifted = pset - center
pangle = np.arctan2(pshifted[:, 1], pshifted[:, 0]) * 180/np.pi
# Sorting order
order = np.argsort(pangle)[::dirdict[direction]]
pset_ordered = pset[order]
if ret == 'order':
return order
elif ret == 'points':
return pset_ordered
elif ret == 'all':
return order, pset_ordered
def csm(coords, model):
""" Continuous symmetry measure for ordered polyhedral vertices.
"""
numerator = np.sum( | np.linalg.norm(coords - model, axis=1) | numpy.linalg.norm |
'''Train CIFAR10 with PyTorch.'''
import sys
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import SubsetRandomSampler,Dataset
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
import argparse
from models import *
from utils import get_indices, save_output_from_dict, conv_helper, count_parameters, to_json, get_dataset
from time import time, strftime, localtime
from statistics import pstdev
import random
import numpy as np
from laplace import Laplace
from laplace.curvature import AsdlEF
from torch.nn.utils import parameters_to_vector, vector_to_parameters
import tqdm
def train(loader):
#print('\nEpoch: %d' % epoch)
net.train()
running_loss = 0.0
for batch_idx, (inputs, targets) in enumerate(loader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
running_loss += loss.item()
# if args.not_CML:
# progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
if args.debug:
break
train_loss = running_loss/(batch_idx+1)
print(epoch, 'loss: %.4f' %train_loss)
return train_loss
def test(loader):
net.eval()
test_loss = 0
correct = 0
total = 0
time0 = time()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
if args.not_CML:
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
if args.debug:
break
net.train()
return 100.*correct/total, ((time() - time0)/60)
def train_acc(loader):
net.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
if args.debug:
break
net.train()
return 100.*correct/total
def get_bma_acc(net, la, loader, n_samples, hessian_structure, temp=1.0):
device = parameters_to_vector(net.parameters()).device
samples = torch.randn(n_samples, la.n_params, device=device)
if hessian_structure == "kron":
samples = la.posterior_precision.bmm(samples, exponent=-0.5)
params = la.mean.reshape(1, la.n_params) + samples.reshape(n_samples, la.n_params) * temp
elif hessian_structure == "diag":
samples = samples * la.posterior_scale.reshape(1, la.n_params) * temp
params = la.mean.reshape(1, la.n_params) + samples
else:
raise
all_probs = []
for sample_params in params:
sample_probs = []
all_ys = []
with torch.no_grad():
vector_to_parameters(sample_params, net.parameters())
net.eval()
for x, y in loader:
logits = net(x.cuda()).detach().cpu()
probs = torch.nn.functional.softmax(logits, dim=-1)
sample_probs.append(probs.detach().cpu().numpy())
all_ys.append(y.detach().cpu().numpy())
sample_probs = np.concatenate(sample_probs, axis=0)
all_ys = np.concatenate(all_ys, axis=0)
all_probs.append(sample_probs)
all_probs = np.stack(all_probs)
bma_probs = np.mean(all_probs, 0)
bma_accuracy = (np.argmax(bma_probs, axis=-1) == all_ys).mean() * 100
return bma_accuracy, bma_probs, all_ys
def get_cmll(bma_probs, all_ys, eps=1e-4):
log_lik = 0
eps = 1e-4
for i, label in enumerate(all_ys):
probs_i = bma_probs[i]
probs_i += eps
probs_i[np.argmax(probs_i)] -= eps * len(probs_i)
log_lik += np.log(probs_i[label]).item()
cmll = log_lik/len(all_ys)
return cmll
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--model', default='LeNet', type=str, help='name of architecture')
parser.add_argument('--data_dir', default='~/data', type=str, help='data directory')
parser.add_argument('--save_path', default='./tables/', type=str, help='path for saving tables')
parser.add_argument('--runs_save_name', default='runs.csv', type=str, help='name of runs file')
parser.add_argument('--save_name', default='table.csv', type=str, help='name of experiment (>= 1 run) file')
# this was substituted with "trainsize", the actual size of the training set that the model should train on
# parser.add_argument('--num_per_class', default=-1, type=int, help='number of training samples per class')
parser.add_argument('--debug', action='store_true', help='debug mode')
parser.add_argument('--not_CML', action='store_true', help='debug mode')
parser.add_argument('--width_multiplier', default=1, type=float, help='width multiplier of ResNets or ResNetFlexes')
parser.add_argument('--width', default=200, type=int, help='width of MLP')
parser.add_argument('--depth', default=3, type=int, help='depth of MLP or depth of ResNetFlex')
parser.add_argument('--conv_width', default=32, type=int, help='width parameter for convnet')
parser.add_argument('--conv_depth', default=0, type=int, help='depth parameter for convnet')
parser.add_argument('--num_filters', nargs='+', type=int, help='number of filters per layer for CNN')
parser.add_argument('--num_classes', default=10, type=int, help='number of classes in classification')
parser.add_argument('--seed', default=None, type=int, help='seed for subset')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--epochs', default=600, type=int, help='num epochs to train')
parser.add_argument('--test_freq', default=5, type=int, help='frequency which to test')
parser.add_argument('--num_runs', default=1, type=int, help='num runs to avg over')
parser.add_argument('--save_model', action='store_true', help='save model state_dict()')
parser.add_argument('--batch_size', default=128, type=int, help='batch_size')
parser.add_argument("--save_json", action="store_true", help="save json?")
parser.add_argument('--dataset', default='CIFAR10', type=str, help='name of dataset')
parser.add_argument('--run_label', default='runlbl', type=str, help='label of run')
parser.add_argument('--decay', default=5e-4, type=float, help='weight decay for the optimizer')
parser.add_argument('--sample_batch_size', default=16, type=int, help='batch size for the validation set -- involves sampling from LA')
parser.add_argument('--trainsize', default=250, type=int, help='weight decay for the optimizer')
parser.add_argument('--hessian_structure', default='diag', type=str, help='structure of the hessian')
parser.add_argument('--bma_nsamples', default=20, type=int, help='whether or not to use bma to get CMLL')
parser.add_argument('--init_temp', default=1e-3, type=float, help='intial temperature for rescaling la covariance')
parser.add_argument('--run_id', default=int, type=int, help='id of the run')
args = parser.parse_args()
print(args)
if args.not_CML:
from progress_bar import progress_bar
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('==> Preparing data..')
trainset, testset = get_dataset(args.data_dir, args.dataset, args.num_classes)
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.batch_size, num_workers=2, shuffle=False)
num_good_runs = 0
global_train_accuracy = 0
test_acc_list = []
global_test_acc = 0
bma_test_acc_list = []
bma_test_ll_list = []
cmll_list = []
mll_list = []
data_directory = "./data"
# making data directory if it doesn't exist
if not os.path.exists(data_directory):
os.makedirs(data_directory)
path = data_directory + "/cifar10_learningcurves_subsets.npz"
if not os.path.exists(path):
n = len(trainset.data)
idx = np.arange(n)
np.random.shuffle(idx)
n1, n2 = int(n * 0.9), int(n * 0.1)
subset_1 = idx[:n1]
subset_2 = idx[n1:n1+n2]
x, y = trainset.data, np.array(trainset.targets)
# training data
x1, y1 = x[subset_1], y[subset_1]
# data used to tune the laplace covariance matrix scale
x2, y2 = x[subset_2], y[subset_2]
# saving the data
np.savez(path,
x1=x1,
y1=y1,
x2=x2,
y2=y2)
subsets_data = np.load(path)
# getting the train and validation loaders
trainset.data = subsets_data["x1"]
trainset.targets = subsets_data["y1"].tolist()
validset, _ = get_dataset(args.data_dir, args.dataset, args.num_classes)
validset.data = subsets_data["x2"]
validset.targets = subsets_data["y2"].tolist()
validloader = torch.utils.data.DataLoader(validset, batch_size=args.sample_batch_size, shuffle=False, num_workers=2)
for run in range(args.num_runs):
# make the full trainset then subset that to two smaller sets
# making a trainset with the size we want
path = data_directory + "/cifar10_subset_train_run_size_{}_{}.npz".format(args.trainsize, args.run_id)
if not os.path.exists(path):
n = len(trainset.data)
idx = np.arange(n)
np.random.shuffle(idx)
subset_1 = idx[:args.trainsize]
x, y = trainset.data, np.array(trainset.targets)
# training data
x1, y1 = x[subset_1], y[subset_1]
np.savez(path,
x1=x1,
y1=y1)
subsets_data = np.load(path)
# creating the new train and new train-test
trainset.data = subsets_data["x1"]
trainset.targets = subsets_data["y1"].tolist()
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, num_workers=2, shuffle=True)
# now subset that to two loaders
# making a subset for the training and test for CMLL
path = data_directory + "/cifar10_subsets_icmll_run_size_{}_{}.npz".format(args.trainsize, args.run_id)
if not os.path.exists(path):
n = len(trainset.data)
idx = np.arange(n)
np.random.shuffle(idx)
n1, n2 = int(n * 0.8), int(n * 0.2)
subset_1 = idx[:n1]
subset_2 = idx[n1:n1+n2]
x, y = trainset.data, np.array(trainset.targets)
# training data
x1, y1 = x[subset_1], y[subset_1]
# data used to tune the laplace covariance matrix scale
x2, y2 = x[subset_2], y[subset_2]
np.savez(path,
x1=x1,
y1=y1,
x2=x2,
y2=y2)
subsets_data = np.load(path)
# creating the new train and new train-test
train_cmll, _ = get_dataset(args.data_dir, args.dataset, args.num_classes)
train_cmll.data = subsets_data["x1"]
train_cmll.targets = subsets_data["y1"].tolist()
test_cmll, _ = get_dataset(args.data_dir, args.dataset, args.num_classes)
test_cmll.data = subsets_data["x2"]
test_cmll.targets = subsets_data["y2"].tolist()
# dataset to compute the CMLL on
test_cmllloader = torch.utils.data.DataLoader(test_cmll, batch_size=args.sample_batch_size, shuffle=False, num_workers=2)
# dataset to train the model on
train_cmllloader = torch.utils.data.DataLoader(
train_cmll, batch_size=args.batch_size, num_workers=2, shuffle=True)
print("this is the size of the full train loader {}, \
CMLL train loader: {}, temp valid train {}, CMLL test train {}".format(len(trainloader.dataset),
len(train_cmllloader.dataset),
len(validloader.dataset),
len(test_cmllloader.dataset)))
# Model
print('==> Building model for CMLL training..')
if args.model == 'VGG19':
net = VGG('VGG19', num_classes=args.num_classes)
elif args.model == 'VGG11':
net = VGG('VGG11', num_classes=args.num_classes)
elif args.model == 'VGG13':
net = VGG('VGG13', num_classes=args.num_classes)
elif args.model == 'VGG16':
net = VGG('VGG16', num_classes=args.num_classes)
elif args.model == 'ResNet6':
net = ResNet6(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet8':
net = ResNet8(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet10':
net = ResNet10(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet12':
net = ResNet12(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet14':
net = ResNet14(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet16':
net = ResNet16(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet18':
net = ResNet18(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNetFlex':
net = ResNetFlex(num_classes=args.num_classes, width_multiplier=args.width_multiplier, depth=args.depth)
elif args.model == 'ResNetFlex34':
net = ResNetFlex34(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNetFlex50':
net = ResNetFlex50(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNetFlex101':
net = ResNetFlex101(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNetFlex152':
net = ResNetFlex152(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == PreActResNet18():
net = PreActResNet18()
elif args.model == 'GoogLeNet':
net = GoogLeNet(num_classes=args.num_classes)
elif args.model == 'LeNet':
net = LeNet(num_classes=args.num_classes)
elif args.model == 'DenseNet121':
net = DenseNet121(num_classes=args.num_classes)
elif args.model == 'DenseNet169':
net = DenseNet169(num_classes=args.num_classes)
elif args.model == 'DenseNet201':
net = DenseNet201(num_classes=args.num_classes)
elif args.model == 'DenseNet161':
net = DenseNet161(num_classes=args.num_classes)
elif args.model == 'ResNeXt29_2x64d':
net = ResNeXt29_2x64d(num_classes=args.num_classes)
elif args.model == 'MobileNet':
net = MobileNet(num_classes=args.num_classes)
elif args.model == 'MobileNetV2':
net = MobileNetV2(num_classes=args.num_classes)
elif args.model == 'DPN26':
net = DPN26(num_classes=args.num_classes)
elif args.model == 'DPN92':
net = DPN92(num_classes=args.num_classes)
elif args.model == 'ShuffleNetG2':
net = ShuffleNetG2(num_classes=args.num_classes)
elif args.model == 'ShuffleNetV2':
net = ShuffleNetV2(net_size=1, num_classes=args.num_classes)
elif args.model == 'SENet18':
net = SENet18(num_classes=args.num_classes)
elif args.model == 'EfficientNetB0':
net = EfficientNetB0(num_classes=args.num_classes)
elif args.model == 'RegNetX_200MF':
net = RegNetX_200MF(num_classes=args.num_classes)
elif args.model == 'RegNetX_400MF':
net = RegNetX_400MF(num_classes=args.num_classes)
elif args.model == 'RegNetY_400MF':
net = RegNetY_400MF(num_classes=args.num_classes)
elif args.model == 'PNASNetA':
net = PNASNetA()
elif args.model == 'AlexNet':
net = AlexNet(num_classes=args.num_classes)
elif args.model == 'MLP':
net = MLP(width=args.width, depth=args.depth)
elif args.model == 'CNN':
# below commented out by lf for ease of producing heat maps
net = CNN(num_filters=args.num_filters)
#net = CNN(num_filters=conv_helper(args.width, args.depth))
elif args.model == 'convnet':
net = convnet(width=args.conv_width, depth=args.conv_depth)
elif args.model == 'SENet18_DPN92':
net = SENet18_DPN92(num_classes=args.num_classes)
print('model ', args.model)
print('width_multiplier ', args.width_multiplier)
ctParams = count_parameters(net)
print('ctParams ', ctParams)
net = net.to(device)
print("torch.cuda.device_count() ", torch.cuda.device_count())
if torch.cuda.device_count() > 1:
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=0.9, weight_decay=args.decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [int(args.epochs/2), int(args.epochs * 3/4)])
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones =[150, 225], gamma =0.1)
test_acc = 0
time0 = time()
for epoch in range(args.epochs):
train_loss = train(train_cmllloader)
'''
if epoch % args.test_freq == 0:
test_acc = test(epoch)
'''
if epoch > 99 and epoch % 10 == 0:
train_accuracy = train_acc(train_cmllloader)
#test_acc, test_time = test()
print('\t\ttrain_acc: %.4f' %train_accuracy)
#print('\t\ttest_acc: %.4f' %test_acc)
if train_accuracy >= 99.9:
test_acc, test_time = test(testloader)
break
elif epoch > 199 and train_accuracy >= 99.5:
test_acc, test_time = test(testloader)
break
elif epoch > 299 and train_accuracy >= 99.0:
test_acc, test_time = test(testloader)
break
if epoch == (args.epochs - 1):
test_acc, test_time = test(testloader)
train_accuracy = train_acc(train_cmllloader)
scheduler.step()
if args.debug:
break
print('train_acc for CMLL', train_accuracy)
print('test_acc for CMLL', test_acc)
train_time = (time() - time0)/60
print('training time in mins ', train_time)
# saving this model:
print("fitting LA for the trainset")
la = Laplace(net, 'classification', subset_of_weights='all',
hessian_structure=args.hessian_structure, prior_precision=args.decay,
backend=AsdlEF)
la.fit(train_cmllloader)
print("done fitting LA for the small data")
epoch = 0
temp = args.init_temp
best_accuracy = 0
best_temp = temp
buffer = 0
max_buffer = 3
max_epochs = 50
while epoch < max_epochs:
bma_accuracy, bma_probs, all_ys = get_bma_acc(net, la, validloader, args.bma_nsamples, args.hessian_structure, temp=temp)
cmll = get_cmll(bma_probs, all_ys, eps=1e-4)
print("current temperate: {}, bma accuracy: {}, cmll: {}".format(temp, bma_accuracy, cmll))
if bma_accuracy > best_accuracy:
best_accuracy = bma_accuracy
best_temp = temp
temp /= 10.
buffer = 0
elif buffer < max_buffer:
buffer += 1
temp /= 5.
else:
break
epoch += 1
print("best temperate: {}, bma accuracy: {}".format(best_temp, best_accuracy))
# using the best temperature to get
bma_accuracy, bma_probs, all_ys = get_bma_acc(net,
la,
test_cmllloader,
args.bma_nsamples, args.hessian_structure, temp=best_temp)
cmll = get_cmll(bma_probs, all_ys, eps=1e-4)
print("cmll value (if nan, increase the initial temperature): ", cmll)
# train the full model
# Model
print('==> Building model for full training..')
if args.model == 'VGG19':
net = VGG('VGG19', num_classes=args.num_classes)
elif args.model == 'VGG11':
net = VGG('VGG11', num_classes=args.num_classes)
elif args.model == 'VGG13':
net = VGG('VGG13', num_classes=args.num_classes)
elif args.model == 'VGG16':
net = VGG('VGG16', num_classes=args.num_classes)
elif args.model == 'ResNet6':
net = ResNet6(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet8':
net = ResNet8(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet10':
net = ResNet10(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet12':
net = ResNet12(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet14':
net = ResNet14(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet16':
net = ResNet16(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNet18':
net = ResNet18(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNetFlex':
net = ResNetFlex(num_classes=args.num_classes, width_multiplier=args.width_multiplier, depth=args.depth)
elif args.model == 'ResNetFlex34':
net = ResNetFlex34(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNetFlex50':
net = ResNetFlex50(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNetFlex101':
net = ResNetFlex101(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == 'ResNetFlex152':
net = ResNetFlex152(num_classes=args.num_classes, width_multiplier=args.width_multiplier)
elif args.model == PreActResNet18():
net = PreActResNet18()
elif args.model == 'GoogLeNet':
net = GoogLeNet(num_classes=args.num_classes)
elif args.model == 'LeNet':
net = LeNet(num_classes=args.num_classes)
elif args.model == 'DenseNet121':
net = DenseNet121(num_classes=args.num_classes)
elif args.model == 'DenseNet169':
net = DenseNet169(num_classes=args.num_classes)
elif args.model == 'DenseNet201':
net = DenseNet201(num_classes=args.num_classes)
elif args.model == 'DenseNet161':
net = DenseNet161(num_classes=args.num_classes)
elif args.model == 'ResNeXt29_2x64d':
net = ResNeXt29_2x64d(num_classes=args.num_classes)
elif args.model == 'MobileNet':
net = MobileNet(num_classes=args.num_classes)
elif args.model == 'MobileNetV2':
net = MobileNetV2(num_classes=args.num_classes)
elif args.model == 'DPN26':
net = DPN26(num_classes=args.num_classes)
elif args.model == 'DPN92':
net = DPN92(num_classes=args.num_classes)
elif args.model == 'ShuffleNetG2':
net = ShuffleNetG2(num_classes=args.num_classes)
elif args.model == 'ShuffleNetV2':
net = ShuffleNetV2(net_size=1, num_classes=args.num_classes)
elif args.model == 'SENet18':
net = SENet18(num_classes=args.num_classes)
elif args.model == 'EfficientNetB0':
net = EfficientNetB0(num_classes=args.num_classes)
elif args.model == 'RegNetX_200MF':
net = RegNetX_200MF(num_classes=args.num_classes)
elif args.model == 'RegNetX_400MF':
net = RegNetX_400MF(num_classes=args.num_classes)
elif args.model == 'RegNetY_400MF':
net = RegNetY_400MF(num_classes=args.num_classes)
elif args.model == 'PNASNetA':
net = PNASNetA()
elif args.model == 'AlexNet':
net = AlexNet(num_classes=args.num_classes)
elif args.model == 'MLP':
net = MLP(width=args.width, depth=args.depth)
elif args.model == 'CNN':
# below commented out by lf for ease of producing heat maps
net = CNN(num_filters=args.num_filters)
#net = CNN(num_filters=conv_helper(args.width, args.depth))
elif args.model == 'convnet':
net = convnet(width=args.conv_width, depth=args.conv_depth)
elif args.model == 'SENet18_DPN92':
net = SENet18_DPN92(num_classes=args.num_classes)
print('model ', args.model)
print('width_multiplier ', args.width_multiplier)
ctParams = count_parameters(net)
print('ctParams ', ctParams)
net = net.to(device)
print("torch.cuda.device_count() ", torch.cuda.device_count())
if torch.cuda.device_count() > 1:
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=0.9, weight_decay=args.decay)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [int(args.epochs/2), int(args.epochs * 3/4)])
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones =[150, 225], gamma =0.1)
test_acc = 0
time0 = time()
for epoch in range(args.epochs):
train_loss = train(trainloader)
'''
if epoch % args.test_freq == 0:
test_acc = test(epoch)
'''
if epoch > 99 and epoch % 10 == 0:
train_accuracy = train_acc(trainloader)
#test_acc, test_time = test()
print('\t\ttrain_acc: %.4f' %train_accuracy)
#print('\t\ttest_acc: %.4f' %test_acc)
if train_accuracy >= 99.9:
test_acc, test_time = test(testloader)
break
elif epoch > 199 and train_accuracy >= 99.5:
test_acc, test_time = test(testloader)
break
elif epoch > 299 and train_accuracy >= 99.0:
test_acc, test_time = test(testloader)
break
if epoch == (args.epochs - 1):
test_acc, test_time = test(testloader)
train_accuracy = train_acc(trainloader)
scheduler.step()
if args.debug:
break
print('train_acc for CMLL', train_accuracy)
print('test_acc for CMLL', test_acc)
train_time = (time() - time0)/60
print('training time in mins ', train_time)
print("fitting LA for the trainset")
la = Laplace(net, 'classification', subset_of_weights='all',
hessian_structure=args.hessian_structure, prior_precision=args.decay,
backend=AsdlEF)
la.fit(trainloader)
print("done fitting LA for the small data")
# Getting the MLL on the fully trained model
mll = la.log_marginal_likelihood(prior_precision=args.decay).item()/len(trainset.data)
bma_test_accuracy, bma_test_probs, all_test_ys = get_bma_acc(net, la, testloader,
args.bma_nsamples,
args.hessian_structure,
temp=best_temp)
bma_test_ll = get_cmll(bma_test_probs, all_test_ys, eps=1e-4)
bma_test_acc_list.append(bma_test_accuracy)
bma_test_ll_list.append(bma_test_ll)
cmll_list.append(cmll)
mll_list.append(mll)
print("bma_test_ll: {}, bma_test_accuracy: {}, mll {} ".format(bma_test_ll, bma_test_accuracy, mll))
save_dict = {'model': args.model, 'wid_multi': args.width_multiplier, 'trainsize': args.trainsize, 'test_acc': test_acc, 'train_acc': train_accuracy, 'train_loss': train_loss, 'ctParams': ctParams, 'GPUs': torch.cuda.device_count(), 'epochs' : epoch, 'train_time': train_time, 'test_time': test_time, 'time': strftime("%m/%d %H:%M", localtime()), 'data': args.dataset, 'run_lbl': args.run_label, 'lr' : args.lr, 'maxEpochs' : args.epochs, 'bma_test_acc': bma_test_accuracy, 'bma_test_ll': bma_test_ll, 'cmll': cmll, 'mll': mll}
fileName = args.dataset + args.runs_save_name
save_output_from_dict(save_dict, save_dir=args.save_path, save_name=fileName)
if train_accuracy >= 99.0:
num_good_runs = num_good_runs + 1
test_acc_list.append(test_acc)
global_test_acc += test_acc
global_train_accuracy += train_accuracy
else:
print("train_accuracy < 98.0\n")
if args.debug:
break
if num_good_runs < 1:
global_test_stdev = -1
else:
global_test_stdev = pstdev(test_acc_list)
global_test_acc /= num_good_runs
global_train_accuracy /= num_good_runs
avg_bma_test_acc = np.mean(bma_test_acc_list)
avg_bma_test_ll = np.mean(bma_test_ll)
avg_cmll = np.mean(cmll_list)
avg_mll = | np.mean(mll_list) | numpy.mean |
import motley
import numpy as np
from motley.table import Table
from numpy.lib.stride_tricks import as_strided
from scipy.stats import binned_statistic_2d
def table_coords(coo, ix_fit, ix_scale, ix_loc):
# TODO: maybe add flux estimate
# create table: coordinates
ocoo = np.array(coo[:, ::-1], dtype='O')
cootbl = Table(ocoo,
col_headers=list('xy'),
col_head_props=dict(bg='g'),
row_headers=range(len(coo)), # starts numbering from 0
# row_nrs=True,
align='>', # easier to read when right aligned
)
# add colour indicators for tracking / fitting / scaling info
ms = 2
m = np.zeros((len(coo) + 1, 3), int)
# m[0] = 1, 2, 3
for i, ix in enumerate((ix_fit, ix_scale, ix_loc)):
m[ix, i] = i + 1
# flag stars
cols = 'gbm'
labels = ('fit|', 'scale|', 'loc|')
tags = np.empty(m.shape, dtype='U1')
tags[m != 0] = 'x'
# tags[:] = 'x' * ms
col_headers = motley.rainbow(labels, bg=cols)
tt = Table(tags, title='\n', # title, # title_props=None,
col_headers=col_headers,
frame=False, align='^',
col_borders='', cell_whitespace=0)
tt.colourise(m, fg=cols)
# ts = tt.add_colourbar(str(tt), ('fit|', 'scale|', 'loc|'))
# join tables
tbl = Table([[str(cootbl), str(tt)]], frame=False, col_borders='')
return tbl
def table_cdist(sdist, window, _print=False):
# from scipy.spatial.distance import cdist
n = len(sdist)
# check for stars that are close together
# sdist = cdist(coo, coo) # pixel distance between stars
# sdist[np.tril_indices(n)] = np.inf
# since the distance matrix is symmetric, ignore lower half
# mask = (sdist == np.inf)
# create distance matrix as table, highlighting stars that are potentially
# too close together and may cause problems
bg = 'light green'
# tbldat = np.ma.array(sdist, mask=mask, copy=True)
tbl = Table(sdist, # tbldat,
title='Distance matrix',
col_headers=range(n),
row_headers=range(n),
col_head_props=dict(bg=bg),
row_head_props=dict(bg=bg),
align='>')
if sdist.size > 1:
# Add colour as distance warnings
c = np.zeros_like(sdist)
c += (sdist < window / 2)
c += (sdist < window)
tbl.colourise(c, *' yr')
tbl.show_colourbar = False
tbl.flag_headers(c, bg=[bg] * 3, fg='wyr')
if _print and n > 1:
print(tbl)
return tbl # , c
def rand_median(cube, ncomb, subset, nchoose=None):
"""
median combine `ncomb`` frames randomly from amongst `nchoose` in the interval
`subset`
Parameters
----------
cube
ncomb
subset
nchoose
Returns
-------
"""
if isinstance(subset, int):
subset = (0, subset) # treat like a slice
i0, i1 = subset
if nchoose is None: # if not given, select from entire subset
nchoose = i1 - i0
# get frame indices
nfirst = min(nchoose, i1 - i0)
ix = | np.random.randint(i0, i0 + nfirst, ncomb) | numpy.random.randint |
import numpy as np
from keras.utils import to_categorical
from keras import models
from keras import layers
from keras.datasets import imdb
(training_data, training_targets), (testing_data, testing_targets) = imdb.load_data(num_words=10000)
data = | np.concatenate((training_data, testing_data), axis=0) | numpy.concatenate |
import unittest
import numpy as np
from pecanpy.graph import BaseGraph, AdjlstGraph, SparseGraph, DenseGraph
MAT = np.array([[0, 1, 1], [1, 0, 0], [1, 0, 0]], dtype=float)
INDPTR = np.array([0, 2, 3, 4], dtype=np.uint32)
INDICES = np.array([1, 2, 0, 0], dtype=np.uint32)
DATA = np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32)
ADJLST = [{1: 1.0, 2: 1.0}, {0: 1}, {0: 1}]
IDS = ["a", "b", "c"]
IDMAP = {"a": 0, "b": 1, "c": 2}
class TestBaseGraph(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.g = BaseGraph()
cls.g.set_ids(IDS)
def test_set_ids(self):
self.assertEqual(self.g.IDlst, IDS)
self.assertEqual(self.g.IDmap, IDMAP)
def test_properties(self):
self.assertEqual(self.g.num_nodes, 3)
with self.assertRaises(NotImplementedError):
self.assertEqual(self.g.num_edges, 4)
with self.assertRaises(NotImplementedError):
self.assertEqual(self.g.density, 2/3)
class TestAdjlstGraph(unittest.TestCase):
def test_from_mat(self):
g = AdjlstGraph.from_mat(MAT, IDS)
self.assertEqual(g._data, ADJLST)
self.assertEqual(g.IDlst, IDS)
def test_properties(self):
self.g = AdjlstGraph.from_mat(MAT, IDS)
self.assertEqual(self.g.num_nodes, 3)
self.assertEqual(self.g.num_edges, 4)
self.assertEqual(self.g.density, 2/3)
class TestSparseGraph(unittest.TestCase):
def tearDown(self):
del self.g
def validate(self):
self.assertTrue( | np.all(self.g.indptr == INDPTR) | numpy.all |
import numpy as np
import cv2
class CoordGenerator(object):
def __init__(self, intrin, img_w, img_h):
super(CoordGenerator, self).__init__()
self.intrinsics = intrin
self.image_width = img_w
self.image_height = img_h
def pixel2local(self, depth): # depth: float32, meter.
cx, cy, fx, fy = self.intrinsics[0, 2], self.intrinsics[1, 2], self.intrinsics[0, 0], self.intrinsics[1, 1]
u_base = np.tile(np.arange(self.image_width), (self.image_height, 1))
v_base = np.tile(np.arange(self.image_height)[:, np.newaxis], (1, self.image_width))
X = (u_base - cx) * depth / fx
Y = (v_base - cy) * depth / fy
coord_camera = | np.stack((X, Y, depth), axis=2) | numpy.stack |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 2 11:05:11 2021
@author: furqanafzal
"""
#%%modules
import _ucrdtw
import os
path='/Users/furqanafzal/Documents/furqan/MountSinai/Research/Code/trakr'
os.chdir(path)
import numpy as np
# import matplotlib.pylab as plt
import modules
import importlib
importlib.reload(modules)
from modules import add_noise,standardize_data,cross_val_metrics
#%% load data
path='/Users/furqanafzal/Documents/furqan/MountSinai/Research/ComputationalNeuro/trakr/neurips2022/data_results'
os.chdir(path)
X=np.load('permutedseqMNIST_alldigits.npy')
# X=standardize_data(X)
y=np.load('mnist_trakr_labels_alldigits.npy')
#%% performance and evaluation - metrics
# meanauc for window 5 is 0.5829566666666666
# svm
# meanauc for window 10 is 0.5866572222222222
# svm
# meanauc for window 20 is 0.5869033333333332
# svm
# meanauc for window 50 is 0.5846750000000001
# svm
# meanauc for window 100 is 0.5775111111111111
# svm
accuracymat=[]
aucmat=[]
for k in range(np.size(X,0)):
distmat=[]
print(f'On iteration {k}')
for i in range(np.size(X,0)):
loc, dist = _ucrdtw.ucrdtw(X[i,:], X[k,:] ,20, False)
distmat.append(dist)
# print(i)
distmat=np.array(distmat).reshape(-1,1)
accuracy,aucvec=cross_val_metrics(distmat,y,n_classes=10,splits=10)
accuracymat.append(accuracy),aucmat.append(aucvec)
#%%
# performance_metrics={'accuracy-svm':accuracy,'auc-svm':aucvec}
# performance_metrics=dict()
performance_metrics['accuracy-knn']=accuracymat
performance_metrics['auc-knn']=aucmat
#%%
import pickle
with open('/Users/furqanafzal/Documents/furqan/MountSinai/Research/ComputationalNeuro/trakr/neurips2022/data_results/noisyinput_metrics_dtw_permutedmnist_noiselimitupto_5', 'wb') as f:
pickle.dump(metrics, f)
#%%
# with open('/Users/furqanafzal/Documents/furqan/MountSinai/Research/ComputationalNeuro/trakr/neurips2022/data_results/metrics_trakr_mnist', 'rb') as f:
# loaded_dict = pickle.load(f)
#%%
################################################################
# Noisy Inputs
################################################################
#%%
path='/Users/furqanafzal/Documents/furqan/MountSinai/Research/ComputationalNeuro/trakr/neurips2022/data_results'
os.chdir(path)
X=np.load('permutedseqMNIST_alldigits.npy')
y=np.load('mnist_trakr_labels_alldigits.npy')
level=np.linspace(0,5,50)
metrics=dict()
# digits=[0,100,200,300,400,500,600,700,800,900]
digits=[0,500,900]
for loop in range(len(level)):
accuracymat=[]
aucmat=[]
X=np.load('permutedseqMNIST_alldigits.npy')
sigma=level[loop]
X=add_noise(X,sigma)
for k in digits:
distmat=[]
for i in range(np.size(X,0)):
loc, dist = _ucrdtw.ucrdtw(X[i,:], X[k,:] ,20, False)
distmat.append(dist)
distmat= | np.array(distmat) | numpy.array |
import pandas as pd
import os
import numpy as np
import argparse
import warnings
parser = argparse.ArgumentParser('Bayes ratio and Brier score for histogram of two variables')
parser.add_argument('file', type=str,
metavar='DF',
help='Location where pkl file saved')
parser.add_argument('--nbins', type=int, default=100)
parser.add_argument('--yvar', type=str, default='model_entropy')
parser.add_argument('--xvar', type=str, default='rank')
parser.add_argument('--xbins', type=float, default=[], nargs='*')
parser.add_argument('--ybins', type=float, default=[], nargs='*')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--eps', type=float, default=0)
parser.add_argument('--K', type=int, default=10)
parser.add_argument('--exclude', type=int, default=[], nargs='*')
parser.set_defaults(show=True)
parser.set_defaults(save=False)
args = parser.parse_args()
np.random.seed(args.seed)
from common import labdict
print('X: %s, Y: %s'%(args.xvar, args.yvar))
df = pd.read_pickle(args.file)
df.drop(args.exclude)
Nsamples = len(df)
K = args.K
N = len(df)
Ix = np.random.permutation(N)
X_ = df[args.xvar]
Y_ = df[args.yvar]
EBR1 = []
EBR5 = []
#n = N//K
#ix = Ix[n*i:n*(i+1)]
#X = np.delete(X_.to_numpy(), ix)
#Y = np.delete(Y_.to_numpy(), ix)
X = X_[Ix]
Y = Y_[Ix]
Nbins = args.nbins
if len(args.ybins)==0:
Yc, Ybins = pd.qcut(Y,Nbins,retbins=True,duplicates='drop')
else:
Yc, Ybins = pd.cut(Y,args.ybins,retbins=True, duplicates='drop', right=False)
if len(args.xbins)==0:
Xc, Xbins = pd.qcut(X,Nbins,retbins=True,duplicates='drop')
else:
Xc, Xbins = pd.cut(X,args.xbins,retbins=True,duplicates='drop', right=False)
#Yvc = Yc.value_counts(sort=False)
#Xvc = Xc.value_counts(sort=False)
H, xe, ye = np.histogram2d(X, Y, bins=[Xbins, Ybins])
P = H/np.sum(H)
Ptop1 = df['top1'].sum()/len(df)
Ptop5 = df['top5'].sum()/len(df)
Otop1 = Ptop1/(1-Ptop1)
Otop5 = Ptop5/(1-Ptop5)
Py = P.sum(axis=0)
Ptop1xbins = P[Xbins[:-1]==0,:].reshape(-1)/Py
Brier1 = Ptop1xbins*(Ptop1xbins - 1)**2 + (1-Ptop1xbins)*Ptop1xbins**2
ix = np.arange(len(Ptop1xbins))
ix1 = Ptop1xbins==1
try:
lb = np.max(ix[ix1])+1
except ValueError as e:
lb = 0
Ptop1xbins[0:(lb+1)] = np.sum(Ptop1xbins[0:(lb+1)])/(lb+1)
ix0 = Ptop1xbins==0
try:
ub = np.min(ix[ix0])
except ValueError as e:
ub = len(Ptop1xbins)
Ptop1xbins[ub:] = np.sum(Ptop1xbins[ub:])/(len(Ptop1xbins)-ub+1)
Otop1xbins = Ptop1xbins/(1-Ptop1xbins+args.eps)
BR1 = Otop1xbins/Otop1
Ptop5xbins = P[Xbins[:-1]<5,:].sum(axis=0)/Py
Brier5 = Ptop5xbins*(Ptop5xbins - 1)**2 + (1-Ptop5xbins)*Ptop5xbins**2
ix5 = Ptop5xbins==1
try:
lb = np.max(ix[ix5])+1
except ValueError as e:
lb = 0
Ptop5xbins[0:(lb+1)] = np.sum(Ptop5xbins[0:(lb+1)])/(lb+1)
ix0 = Ptop5xbins==0
try:
ub = np.min(ix[ix0])
except ValueError as e:
ub = len(Ptop5xbins)
Ptop5xbins[ub:] = np.sum(Ptop5xbins[ub:])/(len(Ptop5xbins)-ub+1)
Otop5xbins = Ptop5xbins/(1-Ptop5xbins+args.eps)
BR5 = Otop5xbins/Otop5
BR1 = np.max([BR1,1/BR1],axis=0)
BR5 = np.max([BR5,1/BR5],axis=0)
EBR1.append(np.sum(Py*BR1))
EBR5.append(np.sum(Py*BR5))
print('E[Bayes ratio, top1] = %.3f'%np.mean(EBR1))
print('E[Bayes ratio, top5] = %.3f'% | np.mean(EBR5) | numpy.mean |
"""
Tests for units.py
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
from astropy.units import Quantity, UnitsError, UnitConversionError
import numpy as np
import pytest
from hsr4hci.units import (
flux_ratio_to_magnitudes,
InstrumentUnitsContext,
magnitude_to_flux_ratio,
)
# -----------------------------------------------------------------------------
# TEST CASES
# -----------------------------------------------------------------------------
def test__instrument_units_context() -> None:
"""
Test `hsr4hci.units.InstrumentUnitsContext`.
"""
# Case 1 (illegal constructor argument: pixscale)
with pytest.raises(UnitsError) as units_error:
InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec'),
lambda_over_d=Quantity(0.096, 'arcsec'),
)
assert "Argument 'pixscale' to function" in str(units_error)
# Case 2 (illegal constructor argument: lambda_over_d)
with pytest.raises(UnitsError) as units_error:
InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.096, 'gram'),
)
assert "Argument 'lambda_over_d' to function" in str(units_error)
instrument_units_context = InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.096, 'arcsec'),
)
# Case 3 (conversion from pixel to arcsec / lambda_over_d)
with instrument_units_context:
quantity = Quantity(1.0, 'pixel')
assert quantity.to('arcsec').value == 0.0271
assert quantity.to('lambda_over_d').value == 0.28229166666666666
# Case 4 (context is re-usable)
with instrument_units_context:
quantity = Quantity(1.0, 'pixel')
assert quantity.to('arcsec').value == 0.0271
assert quantity.to('lambda_over_d').value == 0.28229166666666666
# Case 5 (context is local; conversions do not work outside the context)
with pytest.raises(UnitConversionError) as unit_conversion_error:
_ = quantity.to('arcsec').value
assert "'pix' and 'arcsec' (angle) are not" in str(unit_conversion_error)
# Case 6 (conversion from arcsec to pixel / lambda_over_d)
with instrument_units_context:
quantity = Quantity(1.0, 'arcsec')
assert quantity.to('pixel').value == 36.90036900369004
assert quantity.to('lambda_over_d').value == 10.416666666666666
# Case 7 (conversion from lambda_over_d to arcsec / pixel)
with instrument_units_context:
quantity = Quantity(1.0, 'lambda_over_d')
assert quantity.to('arcsec').value == 0.096
assert quantity.to('pixel').value == 3.5424354243542435
# Case 8 (contexts can be overwritten / re-defined)
instrument_units_context = InstrumentUnitsContext(
pixscale=Quantity(0.271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.96, 'arcsec'),
)
with instrument_units_context:
quantity = Quantity(1.0, 'pixel')
assert quantity.to('arcsec').value == 0.271
assert quantity.to('lambda_over_d').value == 0.2822916666666667
# Case 9 (different contexts can co-exist)
context_a = InstrumentUnitsContext(
pixscale=Quantity(0.0271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.096, 'arcsec'),
)
context_b = InstrumentUnitsContext(
pixscale=Quantity(0.271, 'arcsec / pixel'),
lambda_over_d=Quantity(0.96, 'arcsec'),
)
quantity = Quantity(1.0, 'pixel')
with context_a:
assert quantity.to('arcsec').value == 0.0271
with context_b:
assert quantity.to('arcsec').value == 0.271
def test__flux_ratio_to_magnitudes() -> None:
"""
Test `hsr4hci.units.flux_ratio_to_magnitudes`.
"""
assert flux_ratio_to_magnitudes(100) == -5
assert np.allclose(
flux_ratio_to_magnitudes(np.array([100, 0.01])), np.array([-5, 5])
)
def test__magnitude_to_flux_ratio() -> None:
"""
Test `hsr4hci.units.magnitude_to_flux_ratio`.
"""
assert magnitude_to_flux_ratio(-5) == 100
assert np.allclose(
magnitude_to_flux_ratio( | np.array([-5, 5]) | numpy.array |
import numpy as np
import torch
from utils.bbox_tools import loc2bbox, bbox_iou, bbox2loc
class ProposalCreator:
"""
generate proposal ROIs by call this class
"""
def __init__(self,
rpn_model,
nms_thresh=0.7,
n_train_pre_nms=12000, # on training mode: keep top-n1 bboxes before NMS
n_train_post_nms=2000, # on training mode: keep top-n2 bboxes after NMS
n_test_pre_nms=6000, # on test mode: keep top-n3 bboxes before NMS
n_test_post_nms=300, # on test mode: keep top-n4 bboxes after NMS
min_size=16
):
self.rpn_model = rpn_model
self.nms_thresh = nms_thresh
self.n_train_pre_nms = n_train_pre_nms
self.n_train_post_nms = n_train_post_nms
self.n_test_pre_nms = n_test_pre_nms
self.n_test_post_nms = n_test_post_nms
self.min_size = min_size
def __call__(self, loc, score, anchor, img_size, scale=1.):
"""input should be ndarray
Propose RoIs.
Inputs :obj:`loc, score, anchor` refer to the same anchor when indexed
by the same index.
On notations, :math:`R` is the total number of anchors. This is equal
to product of the height and the width of an image and the number of
anchor bases per pixel.
Type of the output is same as the inputs.
Args:
loc (array): Predicted offsets and scaling to anchors.
Its shape is :math:`(R, 4)`.
score (array): Predicted foreground probability for anchors.
Its shape is :math:`(R,)`.
anchor (array): Coordinates of anchors. Its shape is
:math:`(R, 4)`.
img_size (tuple of ints): A tuple :obj:`height, width`,
which contains image size after scaling.
scale (float): The scaling factor used to scale an image after
reading it from a file.
Returns:
array:
An array of coordinates of proposal boxes.
Its shape is :math:`(S, 4)`. :math:`S` is less than
:obj:`self.n_test_post_nms` in test time and less than
:obj:`self.n_train_post_nms` in train time. :math:`S` depends on
the size of the predicted bounding boxes and the number of
bounding boxes discarded by NMS.
"""
# NOTE: when test, remember
# r_fcn.eval()
# to set self.traing = False
if self.rpn_model.training:
n_pre_nms = self.n_train_pre_nms
n_post_nms = self.n_train_post_nms
else:
n_pre_nms = self.n_test_pre_nms
n_post_nms = self.n_test_post_nms
# Convert the anchors to the ROIs
rois = loc2bbox(anchor, loc)
# clip rois
rois[:, slice(0, 4, 2)] = np.clip(
rois[:, slice(0, 4, 2)], 0, img_size[0])
rois[:, slice(1, 4, 2)] = np.clip(
rois[:, slice(1, 4, 2)], 0, img_size[1])
# remove small rois
min_size = self.min_size * scale
hs = rois[:, 2] - rois[:, 0] # height
ws = rois[:, 3] - rois[:, 1] # width
keep = np.where((hs >= min_size) & (ws >= min_size))[0]
rois = rois[keep, :]
score = score[keep]
# sorted by score
# get topN anchors to NMS, e.g.N=12000(training),6000(testing)
order = score.ravel().argsort()[::-1] # [::-1]表示倒序
if n_pre_nms > 0:
order = order[:n_pre_nms] # shape:(n_pre_nms, )
rois = rois[order, :]
score = score[order]
keep = torch.ops.torchvision.nms(
torch.from_numpy(rois).cuda(),
torch.from_numpy(score).cuda(),
self.nms_thresh
)
if n_post_nms > 0:
keep = keep[:n_post_nms]
rois = rois[keep.cpu().numpy()]
# rois_score = score[keep.cpu().numpy()]
return rois
class ProposalTargetCreator(object):
"""Assign ground truth bounding boxes to given RoIs.
The :meth:`__call__` of this class generates training targets
for each object proposal.
This is used to train Faster RCNN [#]_.
.. [#] <NAME>, <NAME>, <NAME>, <NAME>. \
Faster R-CNN: Towards Real-Time Object Detection with \
Region Proposal Networks. NIPS 2015.
Args:
n_sample (int): The number of sampled regions.
pos_ratio (float): Fraction of regions that is labeled as a
foreground.
pos_iou_thresh (float): IoU threshold for a RoI to be considered as a
foreground.
neg_iou_thresh_hi (float): RoI is considered to be the background
if IoU is in
[:obj:`neg_iou_thresh_hi`, :obj:`neg_iou_thresh_hi`).
neg_iou_thresh_lo (float): See above.
"""
def __init__(self,
n_sample=128,
pos_ratio=0.25, pos_iou_thresh=0.5,
neg_iou_thresh_hi=0.5, neg_iou_thresh_lo=0.0):
self.n_sample = n_sample
self.pos_ratio = pos_ratio
self.pos_iou_thresh = pos_iou_thresh
self.neg_iou_thresh_hi = neg_iou_thresh_hi
self.neg_iou_thresh_lo = neg_iou_thresh_lo
def __call__(self, roi, bbox, label,
loc_normalize_mean=(0., 0., 0., 0.),
loc_normalize_std=(0.1, 0.1, 0.2, 0.2)):
"""Assigns ground truth to sampled proposals.
This function samples total of :obj:`self.n_sample` RoIs
from the combination of :obj:`roi` and :obj:`bbox`.
The RoIs are assigned with the ground truth class labels as well as
bounding box offsets and scales to match the ground truth bounding
boxes. As many as :obj:`pos_ratio * self.n_sample` RoIs are
sampled as foregrounds.
Offsets and scales of bounding boxes are calculated using
:func:`model.utils.bbox_tools.bbox2loc`.
Also, types of input arrays and output arrays are same.
Here are notations.
* :math:`S` is the total number of sampled RoIs, which equals \
:obj:`self.n_sample`.
* :math:`L` is number of object classes possibly including the \
background.
Args:
roi (array): Region of Interests (RoIs) from which we sample.
Its shape is :math:`(R, 4)`
bbox (array): The coordinates of ground truth bounding boxes.
Its shape is :math:`(R', 4)`.
label (array): Ground truth bounding box labels. Its shape
is :math:`(R',)`. Its range is :math:`[0, L - 1]`, where
:math:`L` is the number of foreground classes.
loc_normalize_mean (tuple of four floats): Mean values to normalize
coordinates of bouding boxes.
loc_normalize_std (tupler of four floats): Standard deviation of
the coordinates of bounding boxes.
Returns:
(array, array, array):
* **sample_roi**: Regions of interests that are sampled. \
Its shape is :math:`(S, 4)`.
* **gt_roi_loc**: Offsets and scales to match \
the sampled RoIs to the ground truth bounding boxes. \
Its shape is :math:`(S, 4)`.
* **gt_roi_label**: Labels assigned to sampled RoIs. Its shape is \
:math:`(S,)`. Its range is :math:`[0, L]`. The label with \
value 0 is the background.
"""
# get numbers of bbox
n_bbox, _ = bbox.shape
# Join GT bboxes
roi = np.concatenate((roi, bbox), axis=0)
# Preset number of positive samples
pos_roi_per_image = np.round(self.n_sample * self.pos_ratio)
# get IOU between roi and bbox
iou = bbox_iou(roi, bbox)
# argmax index of each ROI
gt_assignment = iou.argmax(axis=1)
# max IOU of each ROI
max_iou = iou.max(axis=1)
# label of each ROI, the positive label start with 1
gt_roi_label = label[gt_assignment] + 1
# positive ROIs
pos_index = np.where(max_iou >= self.pos_iou_thresh)[0]
pos_roi_per_this_image = int(min(pos_roi_per_image, pos_index.size))
if pos_index.size > 0:
pos_index = np.random.choice(
pos_index, size=pos_roi_per_this_image, replace=False
)
# negative ROIs
neg_index = np.where((max_iou < self.neg_iou_thresh_hi) &
(max_iou >= self.neg_iou_thresh_lo))[0]
# the number of negative ROIs
neg_roi_per_this_image = self.n_sample - pos_roi_per_this_image
neg_roi_per_this_image = int(min(neg_roi_per_this_image, neg_index.size))
if neg_index.size > 0:
neg_index = np.random.choice(
neg_index, size=neg_roi_per_this_image, replace=False
)
keep_index = np.append(pos_index, neg_index)
gt_roi_label = gt_roi_label[keep_index]
gt_roi_label[pos_roi_per_this_image:] = 0 # set the lable of neg ROIs to zero
sample_roi = roi[keep_index]
gt_roi_loc = bbox2loc(sample_roi, bbox[gt_assignment[keep_index]])
gt_roi_loc = ((gt_roi_loc - np.array(loc_normalize_mean, np.float32)) /
np.array(loc_normalize_std, np.float32))
return sample_roi, gt_roi_loc, gt_roi_label
class AnchorTargetCreator(object):
"""
Assign the ground truth bounding boxes to anchors.
params:
n_sample the numbers of sample anchors
pos_iou_thresh float, the anchor positive if its IOU with gt_bbox > pos_iou_thresh
neg_iou_thresh float, the anchor negative if its IOU with gt_bbox < neg_iou_thresh
pos_ratio: float, n_sample_pos / n_sample
"""
def __init__(self,
n_sample=256,
pos_iou_thresh=0.7, neg_iou_thresh=0.3,
pos_ratio=0.5):
self.n_sample = n_sample
self.pos_iou_thresh = pos_iou_thresh
self.neg_iou_thresh = neg_iou_thresh
self.pos_ratio = pos_ratio
def __call__(self, gt_bbox, anchor, img_size):
"""Assign ground truth supervision to sampled subset of anchors.
Types of input arrays and output arrays are same.
Here are notations.
* :math:`S` is the number of anchors.
* :math:`R` is the number of bounding boxes.
Args:
bbox (array): Coordinates of bounding boxes. Its shape is
:math:`(R, 4)`.
anchor (array): Coordinates of anchors. Its shape is
:math:`(S, 4)`.
img_size (tuple of ints): A tuple :obj:`H, W`, which
is a tuple of height and width of an image.
Returns:
(array, array):
#NOTE: it's scale not only offset
* **loc**: Offsets and scales to match the anchors to \
the ground truth bounding boxes. Its shape is :math:`(S, 4)`.
* **label**: Labels of anchors with values \
:obj:`(1=positive, 0=negative, -1=ignore)`. Its shape \
is :math:`(S,)`.
"""
img_H, img_W = img_size
n_anchor = len(anchor)
# Get the index of anchors completely inside the image, e.g. array[0, 1, 3, 6]
inside_index = _get_inside_index(anchor, img_H, img_W)
anchor = anchor[inside_index]
# shape: (inside_anchor_num, ) & (inside_anchor_num, )
achor_argmax_ious, anchor_label = self._create_label(anchor, gt_bbox)
# compute bounding box regression targets
anchor_loc = bbox2loc(anchor, gt_bbox[achor_argmax_ious]) # shape:(inside_anchor_num, 4)
# map up to original set of anchors
anchor_label = _unmap(anchor_label, n_anchor, inside_index, fill=-1) # shape:(n_anchor, )
anchor_loc = _unmap(anchor_loc, n_anchor, inside_index, fill=0) # shape:(n_anchor, 4)
return anchor_loc, anchor_label
def _create_label(self, anchor, gt_bbox):
# label: 1 is positive, 0 is negative, -1 is dont care
anchor_label = np.empty((anchor.shape[0], ), dtype=np.int32) # shape:(inside_anchor_num, 4)
anchor_label.fill(-1) # 初始化anchor标记为-1(弃用)
anchor_argmax_ious, anchor_max_ious, gt_argmax_ious = self._calc_ious(anchor, gt_bbox)
'''assign labels'''
# assign negative labels first so that positive labels can clobber them
anchor_label[anchor_max_ious < self.neg_iou_thresh] = 0
# positive label: for each gt, anchor with highest iou
anchor_label[gt_argmax_ious] = 1
# positive label: above threshold IOU
anchor_label[anchor_max_ious >= self.pos_iou_thresh] = 1
# subsample positive labels if we have too many
n_pos = int(self.pos_ratio * self.n_sample)
pos_index = np.where(anchor_label == 1)[0]
if len(pos_index) > n_pos:
disable_index = np.random.choice(
pos_index, size=(len(pos_index) - n_pos),
replace=False
)
anchor_label[disable_index] = -1 # reset to initial value (skip)
# subsample negative labels if we have too many
n_neg = self.n_sample - np.sum(anchor_label == 1)
neg_index = np.where(anchor_label == 0)[0]
if len(neg_index) > n_neg:
disable_index = np.random.choice(
neg_index, size=(len(neg_index) - n_neg),
replace=False
)
anchor_label[disable_index] = -1
return anchor_argmax_ious, anchor_label
def _calc_ious(self, anchor, gt_bbox):
# ious between the anchors and the gt boxes
ious = bbox_iou(anchor, gt_bbox)
anchor_size, gt_bbox_size = ious.shape
anchor_argmax_ious = ious.argmax(axis=1)
anchor_max_ious = ious[np.arange(anchor_size), anchor_argmax_ious]
gt_argmax_ious = ious.argmax(axis=0)
gt_max_ious = ious[gt_argmax_ious, | np.arange(gt_bbox_size) | numpy.arange |
from blenderneuron.section import Section
import numpy as np
import math
import numpy as np
class BlenderSection(Section):
def __init__(self):
super(BlenderSection, self).__init__()
self.was_split = False
self.split_sections = []
def from_full_NEURON_section_dict(self, nrn_section_dict):
self.name = nrn_section_dict["name"]
self.nseg = nrn_section_dict["nseg"]
self.point_count = nrn_section_dict["point_count"]
self.coords = nrn_section_dict["coords"]
self.radii = nrn_section_dict["radii"]
self.parent_connection_loc = nrn_section_dict["parent_connection_loc"]
self.connection_end = nrn_section_dict["connection_end"]
# Parse the children
self.children = []
for nrn_child in nrn_section_dict["children"]:
child = BlenderSection()
child.from_full_NEURON_section_dict(nrn_child)
self.children.append(child)
self.segments_3D = []
if "activity" in nrn_section_dict:
self.activity.from_dict(nrn_section_dict["activity"])
def make_split_sections(self, max_length):
"""
Splits a section into smaller chained sub-sections if the arc length of the points
exceeds the specified length. This is used to temporarily split the sections for
confining dendrites between layers.
:param max_length: maximum allowed section length in um
:return: None
"""
arc_lengths = self.arc_lengths()
total_length = arc_lengths[-1]
num_sections = int(math.ceil(total_length / max_length))
is_too_long = num_sections > 1
if not is_too_long:
return None
# Mark the the section as having been split
self.was_split = True
# Get the maximum length of the new sections
new_length = total_length / num_sections
# Create new sections
self.split_sections = [BlenderSection() for i in range(num_sections)]
old_coords = np.array(self.coords).reshape((-1, 3))
old_radii = np.array(self.radii)
# Split the coords and radii
split_length = 0
point_i = 0
for split_sec_i, split_sec in enumerate(self.split_sections):
split_length += new_length
split_sec_coords = []
split_sec_radii = []
# Start a 2nd+ split section with the most recent point
if split_sec_i > 0:
prev_sec = self.split_sections[split_sec_i-1]
split_sec_coords.append(prev_sec.coords[-1])
split_sec_radii.append(prev_sec.radii[-1])
exact_length_match = False
# Add 3d points to the split until reached the end of the split
while arc_lengths[point_i] <= split_length:
split_sec_coords.append(old_coords[point_i])
split_sec_radii.append(old_radii[point_i])
exact_length_match = abs(arc_lengths[point_i] - split_length) < 0.001
point_i += 1
if point_i == len(arc_lengths):
break
# If reached the end of the sub-section, but the last real sub-section point is not
# at the exact end of the sub-section, then create a virtual point, which
# lies at the exact end of the sub-section
if not exact_length_match:
virtual_arc_length_delta = split_length - arc_lengths[point_i-1]
pt_segment_arc_length_delta = arc_lengths[point_i] - arc_lengths[point_i - 1]
pt_segment_vector = old_coords[point_i] - old_coords[point_i-1]
fraction_along = virtual_arc_length_delta / pt_segment_arc_length_delta
virtual_coord = old_coords[point_i-1] + pt_segment_vector * fraction_along
pt_segment_radius_delta = old_radii[point_i] - old_radii[point_i-1]
virtual_radius = old_radii[point_i-1] + pt_segment_radius_delta * fraction_along
split_sec_coords.append(virtual_coord)
split_sec_radii.append(virtual_radius)
split_sec.coords = np.array(split_sec_coords)
split_sec.radii = np.array(split_sec_radii)
split_sec.point_count = len(split_sec.radii)
split_sec.name = self.name + "["+str(split_sec_i)+"]"
return self.split_sections
def update_coords_from_split_sections(self):
if not self.was_split:
return
# Reassemble the coords and radii, skipping identical consecutive points
prev_coord, prev_radius = None, None
coords, radii = [], []
for split_i, split_sec in enumerate(self.split_sections):
for coord_i, coord in enumerate(np.reshape(split_sec.coords, (-1, 3))):
radius = split_sec.radii[coord_i]
# Skip if identical to previous point
if prev_coord is not None and radius == prev_radius and \
np.all(np.isclose(coord, prev_coord, rtol=0.001)):
continue
else:
coords.append(coord)
radii.append(radius)
prev_coord = coord
prev_radius = radius
self.coords = | np.array(coords) | numpy.array |
# Say "Hello, World!" With Python
if __name__ == '__main__':
print("Hello, World!")
# Python If-Else
if __name__ == '__main__':
n = int(input().strip())
if n % 2 == 0:
if (n >= 2 and n <= 5) or n > 20:
print("Not Weird")
else:
print("Weird")
else:
print("Weird")
# Arithmetic Operators
if __name__ == '__main__':
a = int(input())
b = int(input())
print(a + b)
print(a - b)
print(a * b)
# Python: Division
if __name__ == '__main__':
a = int(input())
b = int(input())
print(a//b)
print(a/b)
# Loops
if __name__ == '__main__':
n = int(input())
i = 0
while (i < n):
print(i * i)
i += 1
# Write a function
def is_leap(year):
leap = False
# Write your logic here
if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:
leap = True
return leap
year = int(input())
print(is_leap(year))
# Print Function
if __name__ == '__main__':
n = int(input())
for i in range(1, n+ 1):
print(i, end="")
# List Comprehensions
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
list = []
for i in range(x + 1):
for j in range(y + 1):
for k in range(z + 1):
if (i + j + k != n):
list.append([i, j, k])
print(list)
# Find the Runner-Up Score!
if __name__ == '__main__':
n = int(input())
arr = map(int, input().split())
array = list(arr)
i = max(array)
while (i == max(array)):
array.remove(max(array))
print(max(array))
# Nested Lists
if __name__ == '__main__':
d = {}
for _ in range(int(input())):
name = input()
score = float(input())
d[name] = score
v = d.values()
second = sorted(list(set(v)))[1]
second_list = []
for key, value in d.items():
if (value == second):
second_list.append(key)
second_list.sort()
for i in second_list:
print(i)
# Finding the percentage
from decimal import Decimal
if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
student_marks[name] = scores
query_name = input()
scores_list = student_marks[query_name]
sums = sum(scores_list);
avg = Decimal(sums / 3)
print(round(avg, 2))
# Lists
if __name__ == '__main__':
N = int(input())
l = []
for _ in range(N):
s = input().split()
cmd = s[0]
args = s[1:]
if cmd != "print":
cmd += "(" + ",".join(args) + ")"
eval("l." + cmd)
else:
print(l)
# Tuples
if __name__ == '__main__':
n = int(input())
integer_list = map(int, input().split())
t = tuple(integer_list)
print(hash(t))
# sWAP cASE
def swap_case(s):
ret = ""
for l in s:
if (l.islower()):
ret += l.upper()
else:
ret += l.lower()
return ret
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
# String Split and Join
def split_and_join(line):
res = ""
res_arr = line.split(" ")
res = "-".join(res_arr)
return res
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
# What's Your Name?
def print_full_name(a, b):
print("Hello " + a + " " + b + "! You just delved into python.")
if __name__ == '__main__':
first_name = input()
last_name = input()
print_full_name(first_name, last_name)
# Mutations
def mutate_string(string, position, character):
res = string[:position] + character + string[position + 1:]
return res
if __name__ == '__main__':
s = input()
i, c = input().split()
s_new = mutate_string(s, int(i), c)
print(s_new)
# Find a string
def count_substring(string, sub_string):
res = 0
for i in range(len(string)):
if (string[i:i + len(sub_string)] == sub_string):
res += 1
return res
if __name__ == '__main__':
string = input().strip()
sub_string = input().strip()
count = count_substring(string, sub_string)
print(count)
# String Validators
if __name__ == '__main__':
s = input()
a = "False"
b = "False"
c = "False"
d = "False"
e = "False"
for car in s:
if (car.isalnum()):
a = "True"
if (car.isalpha()):
b = "True"
if (car.isdigit()):
c = "True"
if (car.islower()):
d = "True"
if (car.isupper()):
e = "True"
print(a + "\n" + b + "\n" + c + "\n" + d + "\n" + e)
# Text Alignment
thickness = int(input())
c = 'H'
# top arrow
for i in range(thickness):
print((c * i).rjust(thickness - 1) + c + (c * i).ljust(thickness - 1))
for i in range(thickness + 1):
print((c * thickness).center(thickness * 2) + (c * thickness).center(thickness * 6))
for i in range((thickness + 1) // 2):
print((c * thickness * 5).center(thickness * 6))
for i in range(thickness + 1):
print((c * thickness).center(thickness * 2) + (c * thickness).center(thickness * 6))
# bottom arrow
for i in range(thickness):
print(((c * (thickness - i - 1)).rjust(thickness) + c + (c * (thickness - i - 1)).ljust(thickness)).rjust(
thickness * 6))
# Text Wrap
import textwrap
def wrap(string, max_width):
sol = textwrap.fill(string, max_width)
return sol
if __name__ == '__main__':
string, max_width = input(), int(input())
result = wrap(string, max_width)
print(result)
# Designer Door Mat
rows, cols = map(int, input().split())
middle = rows // 2 + 1
for i in range(1, middle):
cen = (i * 2 - 1) * ".|."
print(cen.center(cols, "-"))
print("WELCOME".center(cols, "-"))
for i in reversed(range(1, middle)):
cen = (i * 2 - 1) * ".|."
print(cen.center(cols, "-"))
# String Formatting
def print_formatted(number):
width = len("{0:b}".format(n))
for i in range(1, number + 1):
print("{0:{width_}d} {0:{width_}o} {0:{width_}X} {0:{width_}b}".format(i, width_=width))
if __name__ == '__main__':
n = int(input())
print_formatted(n)
# Alphabet Rangoli
import string
def print_rangoli(size):
arr_string = string.ascii_lowercase
R = []
for i in range(size):
cen = "-".join(arr_string[i:size])
R.append((cen[::-1] + cen[1:]).center(4 * size - 3, "-"))
print('\n'.join(R[:0:-1] + R))
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
# Capitalize!
# !/bin/python3
import math
import os
import random
import re
import sys
# Complete the solve function below.
def solve(s):
for word in s.split():
s = s.replace(word, word.capitalize())
return s
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = solve(s)
fptr.write(result + '\n')
fptr.close()
# The Minion Game
def minion_game(string):
vow = "AEIOU"
kev = 0
stu = 0
for i in range(len(string)):
if string[i] in vow:
kev += (len(string) - i)
else:
stu += (len(string) - i)
if kev > stu:
print("Kevin", kev)
elif kev < stu:
print("Stuart", stu)
else:
print("Draw")
if __name__ == '__main__':
s = input()
minion_game(s)
# Merge the Tools!
import textwrap
def merge_the_tools(string, k):
u = []
len_u = 0
for c in string:
len_u += 1
if c not in u:
u.append(c)
if len_u == k:
print("".join(u))
u = []
len_u = 0
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k)
# Introduction to Sets
def average(array):
s = set(array)
avg = (sum(s) / len(s))
return avg
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result)
# Symmetric Difference
m, M = (int(input()), input().split())
n, N = (int(input()), input().split())
x = set(M)
y = set(N)
diff_yx = y.difference(x)
diff_xy = x.difference(y)
un = diff_yx.union(diff_xy)
print('\n'.join(sorted(un, key=int)))
# No Idea!
nm = list(map(int, input().split()))
arr = list(map(int, input().split()))
a = list(map(int, input().split()))
b = list(map(int, input().split()))
A = set(a)
B = set(b)
happy = 0
for num in arr:
if num in A:
happy += 1
if num in B:
happy -= 1
print(happy)
# Set .add()
N = int(input())
s = set()
for i in range(N):
s.add(input())
print(len(s))
# Set .discard(), .remove() & .pop()
n = int(input())
s = set(map(int, input().split()))
N = int(input())
for i in range(N):
cmd = input().split()
if cmd[0] == "pop" and len(s) != 0:
s.pop()
elif cmd[0] == "remove" and int(cmd[1]) in s:
s.remove(int(cmd[1]))
elif cmd[0] == "discard":
s.discard(int(cmd[1]))
print(sum(s))
# Set .union() Operation
_, a = input(), set(input().split())
_, b = input(), set(input().split())
print(len(a.union(b)))
# Set .intersection() Operation
_, a = input(), set(input().split())
_, b = input(), set(input().split())
print(len(a.intersection(b)))
# Set .difference() Operation
_, a = input(), set(input().split())
_, b = input(), set(input().split())
print(len(a.difference(b)))
# Set .symmetric_difference() Operation
_, a = input(), set(input().split())
_, b = input(), set(input().split())
print(len(a.symmetric_difference(b)))
# Set Mutations
_, A = int(input()), set(map(int, input().split()))
B = int(input())
for _ in range(B):
command, newSet = input().split()[0], set(map(int, input().split()))
getattr(A, command)(newSet)
print(sum(A))
# The Captain's Room
n, arr = int(input()), list(map(int, input().split()))
myset = set(arr)
sum1 = sum(myset) * n
sum2 = sum(arr)
print((sum1 - sum2) // (n - 1))
# Check Subset
n = int(input())
for _ in range(n):
x, a, z, b = input(), set(input().split()), input(), set(input().split())
print(a.issubset(b))
# Check Strict Superset
a = set(input().split())
count = 0
n = int(input())
for i in range(n):
b = set(input().split())
if a.issuperset(b):
count += 1
print(count == n)
# collections.Counter()
from collections import Counter
numShoes = int(input())
shoes = Counter(map(int, input().split()))
numCust = int(input())
income = 0
for i in range(numCust):
size, price = map(int, input().split())
if shoes[size]:
income += price
shoes[size] -= 1
print(income)
# DefaultDict Tutorial
from collections import defaultdict
d = defaultdict(list)
list1 = []
n, m = map(int, input().split())
for i in range(0, n):
d[input()].append(i + 1)
for i in range(0, m):
list1 = list1 + [input()]
for i in list1:
if i in d:
print(" ".join(map(str, d[i])))
else:
print(-1)
# Collections.namedtuple()
from collections import namedtuple
n = int(input())
a = input()
total = 0
Student = namedtuple('Student', a)
for _ in range(n):
student = Student(*input().split())
total += int(student.MARKS)
print('{:.2f}'.format(total / n))
# Collections.OrderedDict()
from collections import OrderedDict
d = OrderedDict()
N = int(input())
for _ in range(N):
item, space, quantity = input().rpartition(' ')
d[item] = d.get(item, 0) + int(quantity) # add quantity to the item
for item, quantity in d.items():
print(item, quantity)
# Word Order
from collections import OrderedDict
d = OrderedDict()
n = int(input())
for _ in range(n):
word = input()
d[word] = d.get(word, 0) + 1
print(len(d))
print(*d.values())
# Collections.deque()
from collections import deque
d = deque()
for _ in range(int(input())):
method, *n = input().split()
getattr(d, method)(*n)
print(*d)
# Company Logo
from collections import Counter
string = Counter(sorted(input()))
for c in string.most_common(3):
print(*c)
# Piling Up!
from collections import deque
T = int(input())
for _ in range(T):
_, queue = input(), deque(map(int, input().split()))
for cube in reversed(sorted(queue)):
if queue[-1] == cube:
queue.pop()
elif queue[0] == cube:
queue.popleft()
else:
print('No')
break
else:
print('Yes')
# Calendar Module
import calendar
m, d, y = map(int, input().split())
days = {0: 'MONDAY', 1: 'TUESDAY', 2: 'WEDNESDAY', 3: 'THURSDAY', 4: 'FRIDAY', 5: 'SATURDAY', 6: 'SUNDAY'}
print(days[calendar.weekday(y, m, d)])
# Time Delta
from datetime import datetime as dt
format_ = '%a %d %b %Y %H:%M:%S %z'
T = int(input())
for i in range(T):
t1 = dt.strptime(input(), format_)
t2 = dt.strptime(input(), format_)
print(int(abs((t1 - t2).total_seconds())))
# Exceptions
T = int(input())
for i in range(T):
try:
a, b = map(int, input().split())
print(a // b)
except Exception as e:
print("Error Code:", e)
# Zipped!
N, X = map(int, input().split())
sheet = []
for _ in range(X):
marks = map(float, input().split())
sheet.append(marks)
for i in zip(*sheet):
print(sum(i) / len(i))
# Athlete Sort
N, M = map(int, input().split())
nums = []
for i in range(N):
list_ = list(map(int, input().split()))
nums.append(list_)
K = int(input())
nums.sort(key=lambda x: x[K])
for line in nums:
print(*line, sep=' ')
# ginortS
low = []
up = []
odd = []
ev = []
S = input()
for i in sorted(S):
if i.isalpha():
if i.isupper():
x = up
else:
x = low
else:
if (int(i) % 2):
x = odd
else:
x = ev
x.append(i)
print("".join(low + up + odd + ev))
# Map and Lambda Function
cube = lambda x: pow(x, 3)
def fibonacci(n):
fib = [0, 1]
for i in range(2, n):
fib.append(fib[i - 2] + fib[i - 1])
return fib[0:n]
if __name__ == '__main__':
n = int(input())
print(list(map(cube, fibonacci(n))))
# Detect Floating Point Number
import re
n = int(input())
for i in range(n):
isnum = input()
print(bool(re.match(r'^[-+]?[0-9]*\.[0-9]+$', isnum)))
# Re.split()
regex_pattern = r"[.,]+"
import re
print("\n".join(re.split(regex_pattern, input())))
# Group(), Groups() & Groupdict()
import re
s = input().strip()
m = re.search(r'([a-zA-Z0-9])\1+', s)
if m:
print(m.group(1))
else:
print(-1)
# Re.findall() & Re.finditer()
import re
v = "aeiou"
c = "qwrtypsdfghjklzxcvbnm"
m = re.findall(r"(?<=[%s])([%s]{2,})[%s]" % (c, v, c), input(), flags=re.I)
if m:
print("\n".join(m))
else:
print("\n".join(["-1"]))
# Re.start() & Re.end()
import re
s = input()
k = input()
pattern = re.compile(k)
r = pattern.search(s)
if not r:
print("(-1, -1)")
while r:
print("({0}, {1})".format(r.start(), r.end() - 1))
r = pattern.search(s, r.start() + 1)
# Regex Substitution
n = int(input())
for _ in range(n):
line = input()
while " && " in line or " || " in line:
line = line.replace(" && ", " and ").replace(" || ", " or ")
print(line)
# Validating Roman Numerals
regex_pattern = r"M{0,3}(C[MD]|D?C{0,3})(X[CL]|L?X{0,3})(I[VX]|V?I{0,3})$" # Do not delete 'r'.
import re
print(str(bool(re.match(regex_pattern, input()))))
# Validating phone numbers
import re
N = int(input())
for i in range(N):
line = input()
if re.match(r"[789]\d{9}$", line):
print("YES")
else:
print("NO")
# Validating and Parsing Email Addresses
import re
n = int(input())
for _ in range(n):
name, email = input().split(' ')
m = re.match(r"<[A-Za-z](\w|-|\.|_)+@[A-Za-z]+\.[A-Za-z]{1,3}>", email)
if m:
print(name, email)
# Hex Color Code
import re
regex = r":?.(#[0-9a-fA-F]{6}|#[0-9a-fA-F]{3})"
N = int(input())
for _ in range(N):
line = input()
match = re.findall(regex, line)
if match:
print(*match, sep='\n')
# HTML Parser - Part 1
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print('Start :', tag)
for ele in attrs:
print('->', ele[0], '>', ele[1])
def handle_endtag(self, tag):
print('End :', tag)
def handle_startendtag(self, tag, attrs):
print('Empty :', tag)
for ele in attrs:
print('->', ele[0], '>', ele[1])
N = int(input())
parser = MyHTMLParser()
for _ in range(N):
line = input()
parser.feed(line)
# HTML Parser - Part 2
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_comment(self, comment):
if '\n' in comment:
print('>>> Multi-line Comment')
else:
print('>>> Single-line Comment')
print(comment)
def handle_data(self, data):
if data == '\n': return
print('>>> Data')
print(data)
parser = MyHTMLParser()
N = int(input())
html_string = ""
for i in range(N):
html_string += input().rstrip() + '\n'
parser.feed(html_string)
parser.close()
# Detect HTML Tags, Attributes and Attribute Values
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print(tag)
[print('-> {} > {}'.format(*attr)) for attr in attrs]
parser = MyHTMLParser()
N = int(input())
for i in range(N):
line = input()
html = '\n'.join([line])
parser.feed(html)
parser.close()
# Validating UID
import re
T = int(input())
for _ in range(T):
uid = ''.join(sorted(input()))
try:
assert re.search(r'[A-Z]{2}', uid)
assert re.search(r'\d\d\d', uid)
assert not re.search(r'[^a-zA-Z0-9]', uid)
assert not re.search(r'(.)\1', uid)
assert len(uid) == 10
except:
print('Invalid')
else:
print('Valid')
# Validating Credit Card Numbers
import re
N = int(input())
for _ in range(N):
line = input()
if re.match(r"^[456]([\d]{15}|[\d]{3}(-[\d]{4}){3})$", line) and not re.search(r"([\d])\1\1\1",
line.replace("-", "")):
print("Valid")
else:
print("Invalid")
# Validating Postal Codes
regex_integer_in_range = r"^[1-9][\d]{5}$" # Do not delete 'r'.
regex_alternating_repetitive_digit_pair = r"(\d)(?=\d\1)" # Do not delete 'r'.
import re
P = input()
print(bool(re.match(regex_integer_in_range, P))
and len(re.findall(regex_alternating_repetitive_digit_pair, P)) < 2)
# Matrix Script
# !/bin/python3
import math
import os
import random
import re
import sys
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
matrix = []
b = ""
for _ in range(n):
matrix_item = input()
matrix.append(matrix_item)
for z in zip(*matrix):
b += "".join(z)
regex = re.sub(r"(?<=\w)([^\w]+)(?=\w)", " ", b)
print(regex)
# XML 1 - Find the Score
import sys
import xml.etree.ElementTree as etree
def get_attr_number(node):
sum_ = 0
for child in node.iter():
sum_ += (len(child.attrib))
return sum_
if __name__ == '__main__':
sys.stdin.readline()
xml = sys.stdin.read()
tree = etree.ElementTree(etree.fromstring(xml))
root = tree.getroot()
print(get_attr_number(root))
# XML2 - Find the Maximum Depth
import xml.etree.ElementTree as etree
maxdepth = 0
def depth(elem, level):
global maxdepth
level += 1
if (level >= maxdepth):
maxdepth = level
for child in elem:
depth(child, level)
if __name__ == '__main__':
n = int(input())
xml = ""
for i in range(n):
xml = xml + input() + "\n"
tree = etree.ElementTree(etree.fromstring(xml))
depth(tree.getroot(), -1)
print(maxdepth)
# Standardize Mobile Number Using Decorators
def wrapper(f):
def fun(l):
phone = []
for n in l:
phone.append('+91 {} {}'.format(n[-10:-5], n[-5:]))
f(phone)
return fun
@wrapper
def sort_phone(l):
print(*sorted(l), sep='\n')
if __name__ == '__main__':
l = [input() for _ in range(int(input()))]
sort_phone(l)
# Decorators 2 - Name Directory
import operator
def person_lister(f):
def inner(people):
fun = lambda x: int(x[2])
return map(f, sorted(people, key=fun))
return inner
@person_lister
def name_format(person):
return ("Mr. " if person[3] == "M" else "Ms. ") + person[0] + " " + person[1]
if __name__ == '__main__':
people = [input().split() for i in range(int(input()))]
print(*name_format(people), sep='\n')
# Arrays
import numpy
def arrays(arr):
array_ = arr[::-1]
return numpy.array(array_, float)
arr = input().strip().split(' ')
result = arrays(arr)
print(result)
# Shape and Reshape
import numpy
arr = input().split()
array_ = numpy.array(arr, int).reshape(3, 3)
print(array_)
# Transpose and Flatten
import numpy
n, m = map(int, input().split())
arr = []
for _ in range(n):
arr.append(input().strip().split())
array = numpy.array(arr, int)
print(array.transpose())
print(array.flatten())
# Concatenate
import numpy
a, b, c = map(int, input().split())
arrA_ = []
for _ in range(a):
arrA_.append(input().split())
arrB_ = []
for _ in range(b):
arrB_.append(input().split())
arrA = numpy.array(arrA_, int)
arrB = numpy.array(arrB_, int)
print(numpy.concatenate((arrA, arrB), axis=0))
# Zeros and Ones
import numpy
tup = map(int, input().split())
nums = tuple(tup)
zero = numpy.zeros(nums, dtype=numpy.int)
one = numpy.ones(nums, dtype=numpy.int)
print(zero)
print(one)
# Eye and Identity
import numpy
n, m = (map(int, input().split()))
print(str(numpy.eye(n, m, k=0)).replace('0', ' 0').replace('1', ' 1'))
# Array Mathematics
import numpy as np
n, m = map(int, input().split())
a = np.zeros((n, m), int)
b = np.zeros((n, m), int)
for i in range(n):
a[i] = np.array(input().split(), int)
for i in range(n):
b[i] = np.array(input().split(), int)
print(a + b)
print(a - b)
print(a * b)
print(np.array(a / b, int))
print(a % b)
print(a ** b)
# Floor, Ceil and Rint
import numpy
numpy.set_printoptions(sign=' ')
arr = input().split()
a = numpy.array(arr, float)
print(numpy.floor(a))
print(numpy.ceil(a))
print(numpy.rint(a))
# Sum and Prod
import numpy as np
n, m = map(int, input().split())
array = []
for i in range(n):
array.append(input().split())
ar = np.array(array, int)
summ = | np.sum(ar, axis=0) | numpy.sum |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
""""""
import os
import glob
import random
from PIL import Image
import cv2
import numpy as np
import tensorflow as tf
from macpath import join
import sys
import xml.etree.ElementTree as ET
os.environ['TF_CPP_LOG_MIN_LEVEL'] = '1'
try:
xrange
except:
xrange = range
class Config():
def __init__(self):
'''
训练 I-LR:(837*574*3) from 标签I-HR:(4600*3348*3) crop (837*574*3) 按1-32 比例产生子子集
'''
self.is_train = True
self.factor_1 = 3.59375
self.factor_2 = 3.26953125
self.image_w = 837
self.image_h = 574
self.data_root = "E:/Larisv/Larisv-pre/data/"
self.train_data = "E:/Larisv/Larisv-pre/data/traindata/original-face/"
self.train_data_crop = "E:/Larisv/Larisv-pre/data/traindata-crop/"
self.train_label_crop = "E:/Larisv/Larisv-pre/data/trainlabel-crop/"
self.test_data = "E:/Larisv/Larisv-pre/data/testdata/"
self.test_data_crop = "E:/Larisv/Larisv-pre/data/testdata-crop/"
self.index = 0
class Data():
def __init__(self):
pass
def prepare_data(self,sess,config):
"""返回图片文件列表"""
if config.is_train:
print("config.train_data",config.train_data)
filenames = os.listdir(config.train_data)
data_dir = os.path.join(os.getcwd(), config.train_data)
data = glob.glob(os.path.join(data_dir, "*.jpg"))
print("[info] Javice: 当前数据集路径 ",data_dir)
print("[info] Javice: 返回数据集中的样本列表 {0},数据集规模为 {1}".format(data,len(data)))
else:
data_dir = os.path.join(os.sep, (os.path.join(os.getcwd(), config.test_data)), "original-face")
data = glob.glob(os.path.join(data_dir, "*.jpg"))
print("[info] Javice: 当前数据集路径 ",data_dir)
print("[info] Javice: 返回数据集中的样本列表 {0},数据集规模为 {1}".format(data,len(data)))
return data
def imread(self,path,config):
'''返回读取图片array'''
if config.is_train:
image = cv2.imread(path,cv2.IMREAD_UNCHANGED)
return image
else:
image = cv2.imread(path,cv2.IMREAD_UNCHANGED)
h,w,c = image.shape
th = int(config.factor_1*h)
tw = int(config.factor_2*w)
image = cv2.resize(image,(tw,th))
return image
def input_setup(self,sess,config):
if config.is_train:
data = self.prepare_data(sess,config)
else:
data = self.prepare_data(sess,config)
if config.is_train:
for i in xrange(len(data)):
input_= self.imread(data[i],config)
if len(input_.shape) == 3: #print(len(input_.shape) == 3) True
h, w, c = input_.shape
# print("[info] Javice: the image.shape is ",input_.shape)
o_counter = 0
nx = ny = 0
for x in range(0, h-config.image_h+1, config.image_h):
nx += 1 ; ny =0
for y in range(0, w-config.image_w+1, config.image_w):
ny += 1
# print("[info] Javice: x = {0}, y= {1}".format(x,y))
sub_input = input_[x:x+config.image_h, y:y+config.image_w,0:c]
sub_input_ = cv2.GaussianBlur(sub_input,(7,7),sigmaX=5.5)
sub_input = sub_input.reshape([config.image_h, config.image_w, 3])
sub_input_ = sub_input_.reshape([config.image_h, config.image_w, 3])
o_counter += 1
self.imsave(sub_input_,config.train_data_crop+"{0}_{1}.jpg".format(i+1,o_counter))
self.imsave(sub_input,config.train_label_crop+"{0}_{1}.jpg".format(i+1,o_counter))
print("[info] Javice: config.train_data_crop path: ",config.train_data_crop+"{0}_{1}.jpg".format(i+1,o_counter))
# print("[info] Javice: 原始图片子图片nx = {0},子图片ny = {1},共有 = {2}:".format(nx,ny,o_counter))
else:
input_ = self.imread(data[config.index],config)
if len(input_.shape) == 3: #print(len(input_.shape) == 3) True
h, w, c = input_.shape
counter = 0
nx = ny = 0
for x in range(0, h-config.image_h+1, config.image_h):
nx += 1; ny = 0
for y in range(0, w-config.image_w+1, config.image_w):
ny += 1
sub_input = input_[x:x+config.image_h, y:y+config.image_w,0:3]
sub_input = sub_input.reshape([config.image_h, config.image_w, 3])
counter += 1
self.imsave(sub_input,config.test_data_crop+"{0}.jpg".format(counter))
print("[info] Javice: config.test_data_crop: ",config.test_data_crop+"{0}.jpg".format(counter))
print("[info] Javice: 原始图片子图片nx = {0},子图片ny = {1},共有 = {2}张子图: {3}".format(nx,ny,counter,nx*ny == counter))
def imsave(self,image, path):
return cv2.imwrite(path,image,[int(cv2.IMWRITE_JPEG_QUALITY),100])
# if not config.is_train:
# return nx, ny
# 制成tfrecord文件
def save_to_tfrecord(self,cd_list,label,tfrecord_path):
'''
index 0 cats D:/Charben/_Datasets/dogcat/train_data/cats/
index 1 dogs D:/Charben/_Datasets/dogcat/train_data/dogs/
image size : 64,64,3
'''
writer = tf.python_io.TFRecordWriter(tfrecord_path+"train.tfrecord")
for x,y in zip(cd_list,label):
print(x,y)
image = Image.open(x)
# half_the_width = image.size[0]/2
# half_the_height = image.size[1]/2
# image = image.crop(
# (
# half_the_width - 112,
# half_the_height - 112,
# half_the_width + 112,
# half_the_height + 112,
# )
# )
image = image.resize((64,64))
img_raw = image.tobytes() #转为二进制
example = tf.train.Example(
features = tf.train.Features(
feature = {
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[y])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
}
)
)
writer.write(example.SerializeToString())
print("[info]: 数据转化完成")
writer.close()
# 解析tfrecord文件
def parse_tfrecord(self,tfrecord_path,batch_size):
'''
return:
image : Tensor("sub:0", shape=(224, 224, 3), dtype=float32),
image*1/255 -0.5
class:Tensor("Cast_1:0", shape=(), dtype=int32)
'''
filename_queue = tf.train.string_input_producer([tfrecord_path],shuffle=False)
reader = tf.TFRecordReader()
_,serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features = {
'label': tf.FixedLenFeature([],tf.int64),
'img_raw': tf.FixedLenFeature([],tf.string),
}
)
image,label = features['img_raw'],features['label']
decode_img = tf.decode_raw(image,tf.uint8)
decode_img = tf.reshape(decode_img,[224,224,3])
decode_img = tf.cast(decode_img,tf.float32)*(1./255) - 0.5
label = tf.cast(label,tf.int32)
# print("[info] : 解析后数据:{0},类标:{1}".format(decode_img,label))
return decode_img,label
#返回 feed_dict参数
def feed_dict(self,tfrecord_path,batch_size):
img,label = self.parse_data(tfrecord_path,batch_size)
img_batch,label_batch = tf.train.batch([img,label],batch_size=batch_size,capacity=256,num_threads=64)
#print(label_batch,type(label_batch))
#Tensor("shuffle_batch:1", shape=(100,), dtype=int32)
return img_batch,label_batch
# 合成from I-HR' crop 2 complete I-HR
def merge_image(self,config):
new_image = []
crop_image = os.listdir(config.test_data_crop)
for image in crop_image:
print("[info] Javice: image",image)
image = scipy.misc.imread(os.path.join(config.test_data_crop,image)).astype(np.float)
new_image.append(image)
print(len(new_image))
nifb1 = new_image[0]
nifb2 = new_image[4]
nifb3 = new_image[8]
nifb4 = new_image[12]
nifb5 = new_image[16]
for i in range(1,4):
nifb1 = | np.concatenate((nifb1,new_image[i]),axis=1) | numpy.concatenate |
from __future__ import division, print_function
import numpy as np
from functools import partial
def shape_gallery(shape, Number_of_Nodes, *args, **kwargs):
if shape == 'sphere':
# Constants and nodes
phi = (1 + np.sqrt(5)) / 2
N = Number_of_Nodes // 2
radius = kwargs.get('radius')
quadrature_nodes = np.zeros((Number_of_Nodes, 3))
nodes_normal = np.zeros((Number_of_Nodes, 3))
# Fill nodes
for i in range(-N, N):
lat = np.arcsin((2.0 * i) / (2 * N + 1))
lon = (i % phi) * 2 * np.pi / phi
if lon < -np.pi:
lon = 2 * np.pi + lon
elif lon > np.pi:
lon = lon - 2 * np.pi
quadrature_nodes[i + N, :] = [np.cos(lon) * np.cos(lat), np.sin(lon) * np.cos(lat), np.sin(lat)]
quadrature_nodes *= radius
# Define level surface and gradient
def h(p):
return p[:, 0] * p[:, 0] \
+ p[:, 1] * p[:, 1] \
+ p[:, 2] * p[:, 2] \
- radius * radius
def gradh(p):
return 2 * p
# Compute surface normal at nodes
nodes_normal = gradh(quadrature_nodes)
nodes_normal /= np.linalg.norm(nodes_normal, axis=1, keepdims=True)
elif shape == 'ellipsoid':
# Constants and nodes
phi = (1 + np.sqrt(5)) / 2
N = Number_of_Nodes // 2
a = kwargs.get('a')
b = kwargs.get('b')
c = kwargs.get('c')
quadrature_nodes = np.zeros((Number_of_Nodes, 3))
nodes_normal = np.zeros((Number_of_Nodes, 3))
# Fill nodes
for i in range(-N, N):
lat = np.arcsin((2.0 * i) / (2 * N + 1))
lon = (i % phi) * 2 * np.pi / phi
if lon < -np.pi:
lon = 2 * np.pi + lon
elif lon > np.pi:
lon = lon - 2 * np.pi
quadrature_nodes[i + N, :] = [a * np.cos(lon) * np.cos(lat), b * np.sin(lon) * | np.cos(lat) | numpy.cos |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
from collections import deque
import cv2
import glob
import pickle
import os
# set image pipeline mode
try:
import debug_ctrl
debug_ctrl.IMG_MODE
except (ModuleNotFoundError, NameError):
# default mode
IMG_MODE = False
else:
# external configuration
IMG_MODE = debug_ctrl.IMG_MODE
finally:
# init debug support
if IMG_MODE:
debug_folder = 'debug_outputs/'
output_folder = 'output_images/'
debug_cam_cal = debug_folder + 'camera_cal/'
if not os.path.exists(debug_cam_cal):
os.makedirs(debug_cam_cal)
# set image pipeline constants
try:
import lane_finding_const
lane_finding_const.src
lane_finding_const.dst
lane_finding_const.ym_per_pix
lane_finding_const.xm_per_pix
lane_finding_const.N_LP
lane_finding_const.N_MISS
except (ModuleNotFoundError, NameError):
# default values
# perspective transform source points
src = np.float32([[568, 470], [718, 470], [1110, 720], [210, 720]])
# perspective transform destination points
dst = np.float32([[300, 0], [980, 0], [980, 720], [300, 720]])
# pixels space to meters
ym_per_pix = 30 / 720
xm_per_pix = 3.7 / 700
# queue depth and miss count for reset
N_LP = 5
N_MISS = 5
else:
# external configuration
src = lane_finding_const.src
dst = lane_finding_const.dst
ym_per_pix = lane_finding_const.ym_per_pix
xm_per_pix = lane_finding_const.xm_per_pix
N_LP = lane_finding_const.N_LP
N_MISS = lane_finding_const.N_MISS
# init camera matrix and undistortion coefficients
mtx = None
dist = None
# init buffer that stores last N_LP good lane fitting coefficients
lane_l_q = deque([np.array([0, 0, 0])] * N_LP, maxlen=N_LP)
lane_r_q = deque([np.array([0, 0, 0])] * N_LP, maxlen=N_LP)
# count for missed frames
lane_queue_cnt = 0
bad_frame_cnt = 0
def camera_calibration(sample_images, ncol, nrow, isDebug=False):
# prepare object points
objp = np.zeros((ncol * nrow, 3), np.float32)
objp[:, :2] = np.mgrid[:ncol, :nrow].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane
# Make a list of calibration images
images = glob.glob(sample_images)
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (ncol, nrow), None)
# If found, add object points, image points
if ret:
objpoints.append(objp)
imgpoints.append(corners)
if isDebug:
# Draw and display the corners
cv2.drawChessboardCorners(img, (ncol, nrow), corners, ret)
cv2.imwrite(os.path.join(os.getcwd(), debug_folder, fname), img)
# load a sample image
img = cv2.imread(images[0])
# get image sizes
img_size = (img.shape[1], img.shape[0])
# Do camera calibration
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
if isDebug:
# save the objpoints, imgpoints, camera matrix and distortion coefficients
cam_pickle = {}
cam_pickle["objpoints"] = objpoints
cam_pickle["imgpoints"] = imgpoints
cam_pickle["mtx"] = mtx
cam_pickle["dist"] = dist
pickle.dump(cam_pickle, open(os.path.join(os.getcwd(), debug_folder + 'cam_pickle.p'), "wb"))
# undistort on an image
dst = cv2.undistort(img, mtx, dist, None, mtx)
# save camera calibration output image
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
ax1.imshow(img)
ax1.set_title('Original Image')
ax2.imshow(dst)
ax2.set_title('Undistorted Image')
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
plt.savefig(os.path.join(os.getcwd(), output_folder, 'undistort.jpg'))
return mtx, dist
def thresholded_binary(image, isDebug=False):
# HLS color space
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
# HLS color threshold on S channel
s_channel = hls[:, :, 2]
s_thresh = (160, 255)
s_img_binary = np.zeros_like(s_channel)
s_img_binary[(s_channel > s_thresh[0]) & (s_channel < s_thresh[1])] = 1
# Gradients
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# reduce noise with Gaussian Blur
gaussian_kernel = 3
blur_gray = cv2.GaussianBlur(gray, (gaussian_kernel, gaussian_kernel), 0)
# Calculate the x and y gradients
sobel_kernel = 3
sobelx = np.absolute(cv2.Sobel(blur_gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(blur_gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255 * sobelx / np.max(sobelx))
# Create a copy and apply the threshold
sob_thresh = (20, 150)
sob_img_binary = np.zeros_like(scaled_sobel)
sob_img_binary[(scaled_sobel > sob_thresh[0]) & (scaled_sobel < sob_thresh[1])] = 1
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx ** 2 + sobely ** 2)
# Rescale to 8 bit
scale_factor = np.max(gradmag) / 255
gradmag = (gradmag / scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
mag_thresh = (20, 200)
mag_img_binary = np.zeros_like(gradmag)
mag_img_binary[(gradmag > mag_thresh[0]) & (gradmag < mag_thresh[1])] = 1
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(sobely, sobelx)
dir_thresh = (0.8, 1.1)
dir_img_binary = np.zeros_like(absgraddir)
dir_img_binary[(absgraddir > dir_thresh[0]) & (absgraddir < dir_thresh[1])] = 1
# Combine the binary thresholds
gradient_comb_img_binary = np.zeros_like(gray)
gradient_comb_img_binary[(sob_img_binary == 1) & (mag_img_binary == 1) & (dir_img_binary == 1)] = 1
# Combine the color transform and gradient to create a thresholded binary
comb_img_binary = np.zeros_like(gray)
comb_img_binary[(s_img_binary == 1) | (gradient_comb_img_binary == 1)] = 1
if isDebug:
# save thresolded binary image
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
ax1.imshow(image)
ax1.set_title('Undistorted Image')
ax2.imshow(comb_img_binary, cmap='gray')
ax2.set_title('Thresholded Binary Image')
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
plt.savefig(os.path.join(os.getcwd(), output_folder, 'binary_combo.jpg'))
# save detailed binary images
f, axes = plt.subplots(2, 4, figsize=(24, 9))
axes[0, 0].imshow(image)
axes[0, 0].set_title('Undistorted Image')
axes[0, 1].imshow(gray, cmap='gray')
axes[0, 1].set_title('Grayscale Image')
axes[0, 2].imshow(s_img_binary, cmap='gray')
axes[0, 2].set_title('Thresholded S Channel')
axes[1, 0].imshow(sob_img_binary, cmap='gray')
axes[1, 0].set_title('Thresholded X-Gradient')
axes[1, 1].imshow(mag_img_binary, cmap='gray')
axes[1, 1].set_title('Thresholded Gradient Magnitude')
axes[1, 2].imshow(dir_img_binary, cmap='gray')
axes[1, 2].set_title('Thresholded Gradient Direction')
axes[1, 3].imshow(gradient_comb_img_binary, cmap='gray')
axes[1, 3].set_title('Combined Thresholded Gradient')
axes[0, 3].imshow(comb_img_binary, cmap='gray')
axes[0, 3].set_title('Final Thresholded Binary')
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
plt.savefig(os.path.join(os.getcwd(), 'debug_outputs/binary_combo_detail.jpg'))
return comb_img_binary
def perspective_transform(binary_image, src=src, dst=dst, isDebug=False):
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
# Warp the image using OpenCV
warped = cv2.warpPerspective(binary_image, M, binary_image.shape[::-1])
if isDebug:
# draw line overlay in red for visual checking
color = [255, 0, 0]
thickness = 5
src_t = tuple(map(tuple, src))
color_combo = cv2.cvtColor(binary_image * 255, cv2.COLOR_GRAY2RGB)
cv2.line(color_combo, src_t[0], src_t[3], color, thickness)
cv2.line(color_combo, src_t[1], src_t[2], color, thickness)
dst_t = tuple(map(tuple, dst))
color_warped = cv2.cvtColor(warped * 255, cv2.COLOR_GRAY2RGB)
cv2.line(color_warped, dst_t[0], dst_t[3], color, thickness)
cv2.line(color_warped, dst_t[1], dst_t[2], color, thickness)
# save warped images
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
ax1.imshow(color_combo)
ax1.set_title('Thresholded Binary Image')
ax2.imshow(color_warped)
ax2.set_title('Warped Image')
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
plt.savefig(os.path.join(os.getcwd(), output_folder, 'birds_eye.jpg'))
# save transform matrix
trans_pickle = {}
trans_pickle["M"] = M
trans_pickle["Minv"] = Minv
pickle.dump(trans_pickle, open(os.path.join(os.getcwd(), debug_folder + 'trans_pickle.p'), "wb"))
return warped, M, Minv
def _fit_poly(img_shape, leftx, lefty, rightx, righty):
# Fit a second order polynomial to each lane
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0] - 1, img_shape[0])
# Calc both polynomials using ploty, left_fit and right_fit
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
return left_fitx, right_fitx, ploty
def _search_around_poly(binary_warped, ym_per_pix=ym_per_pix, xm_per_pix=xm_per_pix, isDebug=False):
# Choose the width of the margin around the previous polynomial to search
margin = 40
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Set the area of search based on activated x-values
# within the +/- margin of our polynomial function
left_fit_pre_avg = _lane_fit_weighted_average(lane_l_q)
right_fit_pre_avg = _lane_fit_weighted_average(lane_r_q)
left_lane_inds = ((nonzerox > (left_fit_pre_avg[0] * (nonzeroy ** 2) + left_fit_pre_avg[1] * nonzeroy +
left_fit_pre_avg[2] - margin)) & (nonzerox < (left_fit_pre_avg[0] * (nonzeroy ** 2) +
left_fit_pre_avg[1] * nonzeroy + left_fit_pre_avg[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit_pre_avg[0] * (nonzeroy ** 2) + right_fit_pre_avg[1] * nonzeroy +
right_fit_pre_avg[2] - margin)) & (nonzerox < (right_fit_pre_avg[0] * (nonzeroy ** 2) +
right_fit_pre_avg[1] * nonzeroy + right_fit_pre_avg[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# get departure
midpoint = np.int(binary_warped.shape[1] // 2)
leftx_base = np.average(leftx)
rightx_base = np.average(rightx)
midlane = rightx_base - leftx_base
departure = np.around((midpoint - midlane) * xm_per_pix, decimals=2)
if isDebug:
# Fit new polynomials
left_fitx, right_fitx, ploty = _fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
if isDebug:
return leftx, lefty, rightx, righty, departure, result
else:
return leftx, lefty, rightx, righty, departure
def _find_lane_pixels(binary_warped, ym_per_pix=ym_per_pix, xm_per_pix=xm_per_pix, isDebug=False):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
if isDebug:
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] // 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# calculate lane departure,
# where departure <= 0 means car is on the lest of the lane center
midlane = rightx_base - leftx_base
departure = np.around((midpoint - midlane) * xm_per_pix, decimals=2)
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 50
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows
window_height = np.int(binary_warped.shape[0] // nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
if isDebug:
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within current window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
if isDebug:
return leftx, lefty, rightx, righty, departure, out_img
else:
return leftx, lefty, rightx, righty, departure
def _lane_fit_weighted_average(l_q):
# Weights for Low Pass FIR filter
# maxlen supported is 5
# more weights for most recent samples
W = [[1.0], [0.6, 0.4], [0.5, 0.35, 0.15], [0.4, 0.3, 0.2, 0.1], [0.3, 0.25, 0.2, 0.15, 0.1]]
w = W[lane_queue_cnt - 1]
avg = 0.0
for i in range(lane_queue_cnt):
avg += w[i] * l_q[i]
return avg
def _lane_fit_queue_add(ql, qr, lf, rf, tol=0.05):
# trace latest lane fitting
# new fitting parameters are checked against past ones
# it is ignored if the difference is greater the the tolorance
global lane_queue_cnt
global bad_frame_cnt
if lane_queue_cnt < N_LP:
ql.appendleft(lf)
qr.appendleft(rf)
lane_queue_cnt += 1
else:
l_f_avg_pre = _lane_fit_weighted_average(ql)
r_f_avg_pre = _lane_fit_weighted_average(qr)
l_ok = ((l_f_avg_pre - lf) / l_f_avg_pre < tol).all()
r_ok = ((r_f_avg_pre - rf) / r_f_avg_pre < tol).all()
if l_ok and r_ok:
ql.appendleft(lf)
qr.appendleft(rf)
bad_frame_cnt = 0
else:
bad_frame_cnt += 1
if bad_frame_cnt == N_MISS:
lane_queue_cnt -= 1
bad_frame_cnt = 0
def fit_polynomial(binary_warped, ym_per_pix=ym_per_pix, xm_per_pix=xm_per_pix, isDebug=False):
# Find our lane pixels first
if lane_queue_cnt == N_LP:
if isDebug:
leftx, lefty, rightx, righty, departure, out_img = _search_around_poly(binary_warped, isDebug=isDebug)
else:
leftx, lefty, rightx, righty, departure = _search_around_poly(binary_warped, isDebug=isDebug)
else:
if isDebug:
leftx, lefty, rightx, righty, departure, out_img = _find_lane_pixels(binary_warped, isDebug=isDebug)
else:
leftx, lefty, rightx, righty, departure = _find_lane_pixels(binary_warped, isDebug=isDebug)
# Fit a second order polynomial to each lane
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# store current fits if it is a good measurement
_lane_fit_queue_add(lane_l_q, lane_r_q, left_fit, right_fit)
# get weighted average
left_fit_avg = _lane_fit_weighted_average(lane_l_q)
right_fit_avg = _lane_fit_weighted_average(lane_r_q)
# convert to meters
left_fit_cr = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2)
right_fit_cr = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0] )
try:
left_fitx = left_fit_avg[0] * ploty ** 2 + left_fit_avg[1] * ploty + left_fit_avg[2]
right_fitx = right_fit_avg[0] * ploty ** 2 + right_fit_avg[1] * ploty + right_fit_avg[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1 * ploty ** 2 + 1 * ploty
right_fitx = 1 * ploty ** 2 + 1 * ploty
if isDebug:
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Plots the left and right polynomials on the lane lines
plt.figure()
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.imshow(out_img)
plt.title('Sliding Window Lane Fitting Image')
plt.savefig(os.path.join(os.getcwd(), output_folder, 'color_poly_fitting.jpg'))
return ploty, left_fitx, right_fitx, left_fit_cr, right_fit_cr, departure
def measure_curvature_real(ploty, left_fit_cr, right_fit_cr, ym_per_pix=ym_per_pix, xm_per_pix=xm_per_pix):
# the radius of curvature at the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = np.around(((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit_cr[0]), decimals=2)
right_curverad = np.around(((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit_cr[0]), decimals=2)
# average lane curvature
avg_curverad = np.around((left_curverad + right_curverad) / 2., decimals=2)
return left_curverad, right_curverad, avg_curverad
def lane_overlay(undist, Minv, ploty, left_fitx, right_fitx, avg_curverad, depart, isDebug=False):
# Create an image to draw the lines on
color_warp = | np.zeros_like(undist) | numpy.zeros_like |
def icp(a, b,
max_time=1
):
import cv2
import numpy
# import copy
# import pylab
import time
import sys
import sklearn.neighbors
import scipy.optimize
def res(p, src, dst):
T = numpy.matrix([[numpy.cos(p[2]), -numpy.sin(p[2]), p[0]],
[numpy.sin(p[2]), numpy.cos(p[2]), p[1]],
[0, 0, 1]])
n = numpy.size(src, 0)
xt = numpy.ones([n, 3])
xt[:, :-1] = src
xt = (xt * T.T).A
d = numpy.zeros(numpy.shape(src))
d[:, 0] = xt[:, 0] - dst[:, 0]
d[:, 1] = xt[:, 1] - dst[:, 1]
r = numpy.sum(numpy.square(d[:, 0]) + numpy.square(d[:, 1]))
return r
def jac(p, src, dst):
T = numpy.matrix([[numpy.cos(p[2]), -numpy.sin(p[2]), p[0]],
[numpy.sin(p[2]), numpy.cos(p[2]), p[1]],
[0, 0, 1]])
n = numpy.size(src, 0)
xt = numpy.ones([n, 3])
xt[:, :-1] = src
xt = (xt * T.T).A
d = numpy.zeros(numpy.shape(src))
d[:, 0] = xt[:, 0] - dst[:, 0]
d[:, 1] = xt[:, 1] - dst[:, 1]
dUdth_R = numpy.matrix([[-numpy.sin(p[2]), - | numpy.cos(p[2]) | numpy.cos |
'''
* Copyright 2018 Canaan Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
import math
import tensor_list_to_layer_list
import numpy as np
def hotfix_magic_1(eight_bit_mode):
if eight_bit_mode:
return 100000000.0 / 3
else:
return 100000000.0 / 3
def log_next_pow_of_2(value):
ret = 0
while value > 1 or value <= -1:
value = value / 2
ret = ret + 1
return ret, value
def pow_next_log_of_2(value, bound_shift, shift_max_shift=4):
ret = 0
shift_max = 1 << shift_max_shift
while value >= -(1 << (bound_shift - 2)) and value < (1 << (bound_shift - 2)) \
and value != 0 and ret < (shift_max - 1):
value = value * 2
ret = ret + 1
return ret, value
def signed_to_hex(value, width):
return hex(int((1 << width) + value) % (1 << width))
class K210Conv:
def __init__(self, weights, input_tensor_name, depth_wise_layer, eight_bit_mode, xy_shape, xw_minmax):
self.weights = weights
self.weights_shape = self.weights.shape
self.input_shape, self.output_shape = xy_shape
xmin, xmax, wmin, wmax = xw_minmax
self.stride = 1 # layer.config['stride']
self.depth_wise_layer = depth_wise_layer #isinstance(layer, tensor_list_to_layer_list.LayerDepthwiseConvolutional)
self.eight_bit_mode = eight_bit_mode
self.x_range = xmax - xmin
self.x_bias = xmin
assert (self.x_range > 0)
self.w_range = wmax - wmin
self.w_bias = wmin
assert (self.w_range > 0)
if self.input_shape[1:3] != self.output_shape[1:3]:
# raise ValueError('conv2d {} should use padding=SAME'.format(input_tensor_name))
print('[error]', 'conv2d {} should use padding=SAME'.format(input_tensor_name))
self.input_shape = list(self.input_shape)
self.input_shape[1] = self.output_shape[1]
self.input_shape[2] = self.output_shape[2]
if self.input_shape[1] < 4:
tensor_height = self.input_shape[1]
print('[error] feature map required height>4 which {} height is {}'.format(input_tensor_name, tensor_height))
self.input_shape = list(self.input_shape)
self.output_shape = list(self.output_shape)
old_input_wh = self.input_shape[1:3]
old_output_wh = self.output_shape[1:3]
self.input_shape[1:3] = [4,4]
self.output_shape[1:3] = [4,4]
notice = 'tensor {} heigh-width MUST padding from {}x{}=>{}x{} to 4x4=>4x4 in CPU before continue.'.format(input_tensor_name, *old_input_wh, *old_output_wh)
print('[notice] '+('='*71))
print('[notice] '+ notice)
print('[notice] '+('='*71))
@staticmethod
def q(value, scale, bias):
return (value - bias) / scale
def para_mult_loads(self, weights_shape, output_shape, kernel_size):
weight_buffer_size = 2 * 9 * 4096
weights_ich = int(weights_shape[2])
weights_och = int(weights_shape[3])
weight_data_size = 1 if self.eight_bit_mode else 2
if self.depth_wise_layer:
o_ch_weights_size = int(weights_shape[0]) * int(weights_shape[1]) * weight_data_size
else:
o_ch_weights_size = int(weights_shape[0]) * int(weights_shape[1]) * int(weights_shape[2]) * weight_data_size
if int(weights_shape[0]) == 1:
o_ch_weights_size_pad = math.ceil(o_ch_weights_size / 8) * 9
else:
o_ch_weights_size_pad = o_ch_weights_size
assert (int(weights_shape[0]) == 3)
if kernel_size == 3:
load_time = math.ceil(weights_och / math.floor(4096 * 2 / weight_data_size / weights_ich))
elif kernel_size == 1:
load_time = math.ceil(weights_och / math.floor(4096 * 8 * 2 / weight_data_size / weights_ich))
else:
load_time = None
assert (None)
o_ch_num = int(output_shape[3])
o_ch_num_coef = math.floor(weight_buffer_size / o_ch_weights_size_pad)
if self.eight_bit_mode:
half_weight_buffer_size = weight_buffer_size / 2
while True:
last_ch_idx = (o_ch_num - 1) % o_ch_num_coef
last_addr_end = (last_ch_idx + 1) * o_ch_weights_size_pad
if last_addr_end < half_weight_buffer_size:
break
o_ch_num_coef = o_ch_num_coef - 1
load_time = math.ceil(o_ch_num / o_ch_num_coef)
if o_ch_num_coef <= 0:
assert ('cannot fix last_addr_end to first half part')
assert (load_time <= 64)
o_ch_num_coef = min(o_ch_num_coef, o_ch_num)
para_size = o_ch_num_coef * o_ch_weights_size
return load_time, para_size, o_ch_num_coef
def to_k210(self):
input_shape = self.input_shape
output_shape = self.output_shape
weights_shape = self.weights_shape
weights = self.weights
stride = self.stride
weight_data_size = 1 if self.eight_bit_mode else 2
kernel_size = int(weights_shape[0])
# img i
i_row_wid = int(input_shape[2])
i_col_high = int(input_shape[1])
coef_group = 1 if i_row_wid > 32 else (2 if i_row_wid > 16 else 4)
row_switch_addr = math.ceil(i_row_wid / 64)
channel_switch_addr = i_col_high * row_switch_addr
# conv
depth_wise_layer = 1 if self.depth_wise_layer else 0
kernel_type = {1: 0, 3: 1}[kernel_size]
pad_type = 0
load_coor = 1
first_stride = 0 if stride == 1 else 1
assert (256 > (i_col_high if first_stride == 0 else i_col_high / 2))
load_time, para_size, o_ch_num_coef = self.para_mult_loads(weights_shape, output_shape, kernel_size)
x_qmax = 255
w_qmax = (1 << (8 * weight_data_size)) - 1
bias_x, scale_x = self.x_bias, self.x_range / x_qmax
bias_w, scale_w = self.w_bias, self.w_range / w_qmax
bx_div_sx = bias_x / scale_x
bw_div_sw = bias_w / scale_w
shr_x, arg_x = pow_next_log_of_2(bw_div_sw, 24)
shr_w, arg_w = pow_next_log_of_2(bx_div_sx, 24)
arg_add = kernel_size * kernel_size * bw_div_sw * bx_div_sx
pad_value = -bx_div_sx
swsx = scale_w * scale_x
weight_q = ((weights - bias_w) / scale_w).transpose([3, 2, 0, 1])
para_start_addr = [int(round(item)) for item in np.reshape(weight_q, (np.product(weight_q.shape),))]
return {
'swsx': swsx,
'coef_group': coef_group,
'channel_switch_addr': channel_switch_addr,
'depth_wise_layer': depth_wise_layer,
'o_ch_num_coef': o_ch_num_coef,
'i_row_wid': i_row_wid,
'i_col_high': i_col_high,
'kernel_type': kernel_type,
'pad_type': pad_type,
'first_stride': first_stride,
'pad_value': pad_value,
'load_coor': load_coor,
'load_time': load_time,
'para_size': para_size,
'para_start_addr': para_start_addr,
'row_switch_addr': row_switch_addr,
'shr_w': shr_w,
'shr_x': shr_x,
'arg_w': arg_w,
'arg_x': arg_x,
'arg_add': arg_add
}
class K210BN:
def __init__(self, mean, var, gamma, beta, epsilon, eight_bit_mode):
self.mean = mean
self.var = var
self.gamma = gamma
self.beta = beta
self.epsilon = epsilon
self.eight_bit_mode = eight_bit_mode
@staticmethod
def get_bn(scale, bias):
norm_shift, norm_mul = 8, scale * np.power(2,8)#pow_next_log_of_2(scale, 24)
return {'norm_mul': signed_to_hex(norm_mul, 24), 'norm_add': signed_to_hex(bias, 32), 'norm_shift': norm_shift}
def to_k210(self, swsx=1):
sqrt_var = np.sqrt(self.var + self.epsilon)
max_scale_temp = max(swsx * self.gamma / sqrt_var)
if max_scale_temp < 1e-5:
max_scale_temp = 1.1e-5
log_scale_temp_max = 7-np.floor(np.log2(max_scale_temp))
if max_scale_temp >= np.power(2,15):
log_scale_temp_max = 0
hotfix_magic = np.power(2,log_scale_temp_max)
scale = swsx * self.gamma / sqrt_var * hotfix_magic
bias = (self.beta - self.gamma * self.mean / sqrt_var) * hotfix_magic
load_para = 1
bwsx_base_addr = [
self.get_bn(s, b)
for s, b in zip(scale.tolist(), bias.tolist())
]
return locals()
class K210Act:
def __init__(self, act_tensor, min_y, max_y, name, eight_bit_mode):
self.name = name
self.tensor = act_tensor
self.eight_bit_mode = eight_bit_mode
self.min_y = min_y
self.max_y = max_y
@staticmethod
def leaky_relu(x):
return x if x >= 0 else 0.1 * x
@staticmethod
def leaky_relu_inverse(y):
return y if y >= 0 else 10 * y
@staticmethod
def relu_inverse(y):
return y
@staticmethod
def relu6_inverse(y):
return y
@staticmethod
def leaky_table(min_y, max_y):
range_y = max_y - min_y
y_table = [min_y + i * range_y / 15 for i in range(15)]
y_table.append(max_y)
if 0 not in y_table:
y_table.append(0)
y_table = sorted(y_table)
x_table = [K210Act.leaky_relu_inverse(it) for it in y_table]
dydx = [(y_table[i + 1] - y_table[i]) / (x_table[i + 1] - x_table[i]) for i in range(len(y_table) - 1)]
return zip(x_table, y_table, dydx)
@staticmethod
def relu_table(min_y, max_y):
range_y = max_y - min_y
y_table = [min_y + i * range_y / 15 for i in range(15)]
y_table.append(max_y)
if 0 not in y_table:
y_table.append(0)
y_table = sorted(y_table)
x_table = [K210Act.relu_inverse(it) for it in y_table]
dydx = [(y_table[i + 1] - y_table[i]) / (x_table[i + 1] - x_table[i]) for i in range(len(y_table) - 1)]
return zip(x_table, y_table, dydx)
@staticmethod
def relu6_table(min_y, max_y):
range_y = max_y - min_y
y_table = [min_y + i * range_y / 15 for i in range(15)]
y_table.append(max_y)
if 0 not in y_table:
y_table.append(0)
y_table = sorted(y_table)
x_table = [K210Act.relu6_inverse(it) for it in y_table]
dydx = [(y_table[i + 1] - y_table[i]) / (x_table[i + 1] - x_table[i]) for i in range(len(y_table) - 1)]
return zip(x_table, y_table, dydx)
@staticmethod
def linear_table(min_y, max_y):
range_y = max_y - min_y
y_table = [min_y + i * range_y / 14 for i in range(14)]
if 0 not in y_table:
y_table.append(0)
y_table.append(max_y)
y_table = sorted(y_table)
return zip(y_table, y_table, [1] * (len(y_table) - 1))
@staticmethod
def find_shift(dydx):
ret_shift = 0
while abs(dydx) < (1 << 14) and dydx > 0:
dydx = dydx * 2
ret_shift = ret_shift + 1
return ret_shift, dydx
@staticmethod
def table_to_act(act_table, min_y, max_y, __hotfix_magic, eight_bit_mode):
def act_table_aux(x, y, dydx):
y_scale = (max_y - min_y) / 255
y_bias = min_y
x_fix = x * __hotfix_magic
y_fix = (y - y_bias) / y_scale
dydx_fix = dydx / y_scale / __hotfix_magic
yf_q = round(y_fix)
yf_err = y_fix - yf_q
xfy = x_fix - yf_err / dydx_fix
return xfy, yf_q, dydx_fix
act_table = [(0x800000000, 0, 0)] + [act_table_aux(x, y, dydx) for x, y, dydx in act_table]
def ret_aux(x, y, dydx):
dxss, dys = K210Act.find_shift(dydx)
assert (dys >= 0)
return {'x': int(round(x)), 'y': int(round(y)), 'dxs': dxss, 'dy': int(round(dys))}
return [ret_aux(x, y, dydx) for x, y, dydx in act_table]
def to_k210(self, __hotfix_magic):
act_tab = None
if self.name == 'leaky':
act_tab = list(K210Act.leaky_table(self.min_y, self.max_y))
elif self.name == 'Relu':
act_tab = list(K210Act.relu_table(self.min_y, self.max_y))
elif self.name == 'Relu6':
act_tab = list(K210Act.relu6_table(self.min_y, self.max_y))
elif self.name == 'linear':
act_tab = list(K210Act.linear_table(self.min_y, self.max_y))
else:
print(self.name, ' active is not supported.')
assert (None)
return {'active_addr': K210Act.table_to_act(list(act_tab), self.min_y, self.max_y, __hotfix_magic, self.eight_bit_mode)[:16]}
class K210Pool:
def __init__(self, pool_type, size, stride):
self.size = size
self.stride = stride
self.pool_type = pool_type
def to_k210(self):
if self.pool_type == 'MaxPool':
return {'pool_type': {
(2, 2): 1,
(4, 4): 3,
(2, 1): 9
}[(self.size, self.stride)]}
elif self.pool_type == 'AvgPool':
return {'pool_type': {
(2, 2): 2,
(4, 4): 4,
(2, 1): 8
}[(self.size, self.stride)]}
elif self.pool_type == 'hotfix_leftPool':
return {'pool_type': {
(2, 2): 5,
(4, 4): 7,
}[(self.size, self.stride)]}
elif self.pool_type == 'hotfix_rightPool':
return {'pool_type': 6}
else:
return None
class K210Layer:
def __init__(self, eight_bit_mode):
self.conv = None
self.bn = None
self.act = None
self.pool = None
self.eight_bit_mode = eight_bit_mode
@staticmethod
def batch(iter, n=1):
l = len(iter)
for ndx in range(0, l, n):
yield iter[ndx:min(ndx + n, l)]
def to_k210(self, idx):
if self.pool is not None:
output_shape = list(self.conv.output_shape)
output_shape[1] = int(math.floor(self.conv.output_shape[1] / self.pool.stride))
output_shape[2] = int(math.floor(self.conv.output_shape[2] / self.pool.stride))
else:
output_shape = self.conv.output_shape
weights_shape = self.conv.weights_shape
input_shape = self.conv.input_shape
i_row_wid = int(input_shape[1])
img_data_size = 1
coef_group = 1 if i_row_wid > 32 else (2 if i_row_wid > 16 else 4)
# io
i_ch_num = int(weights_shape[2])
o_ch_num = int(output_shape[3])
# img o
o_row_wid = int(output_shape[2])
o_col_high = int(output_shape[1])
wb_group = 1 if o_row_wid > 32 else (2 if o_row_wid > 16 else 4)
wb_row_switch_addr = math.ceil(o_row_wid / 64)
wb_channel_switch_addr = o_col_high * wb_row_switch_addr
channel_byte_num = o_row_wid * o_col_high
int_en = 0
image_src_addr = None
image_dst_addr = None
dma_total_byte = o_row_wid * o_col_high * o_ch_num
dma_burst_size = 0xf
send_data_out = 0
return locals()
def make_k210_layer(sess, dataset, buffer, last_min, last_max, eight_bit_mode, range_from_batch):
cur_k210 = K210Layer(eight_bit_mode)
conv_layer = None
if isinstance(buffer[-1], tensor_list_to_layer_list.LayerConvolutional) \
or isinstance(buffer[-1], tensor_list_to_layer_list.LayerDepthwiseConvolutional):
conv_layer = buffer.pop()
conv_input_shape = list(sess.run(conv_layer.tensor_conv_x, dataset).shape)
conv_output_shape = list(sess.run(conv_layer.tensor_conv_y, dataset).shape)
# hotfix stride=2
if conv_layer.tensor_conv_y.op.get_attr('strides')[1] == 2:
conv_output_shape[1:3] = [conv_output_shape[1]*2, conv_output_shape[2]*2]
conv_input_shape[1:3] = conv_output_shape[1:3]
wmin, wmax, _ = range_from_batch(sess, conv_layer.tensor_conv_w, dataset, is_weights=True)
cur_k210.conv = K210Conv(
conv_layer.weights, conv_layer.tensor_conv_x.name,
isinstance(conv_layer, tensor_list_to_layer_list.LayerDepthwiseConvolutional),
eight_bit_mode, [conv_input_shape, conv_output_shape],
[last_min, last_max, wmin, wmax]
)
if int(conv_layer.config['batch_normalize']) == 1:
cur_k210.bn = K210BN(
conv_layer.batch_normalize_moving_mean,
conv_layer.batch_normalize_moving_variance,
conv_layer.batch_normalize_gamma,
conv_layer.batch_normalize_beta,
conv_layer.batch_normalize_epsilon,
eight_bit_mode
)
else:
bias_shape = conv_layer.bias.shape
cur_k210.bn = K210BN(0, 1, np.ones(bias_shape), conv_layer.bias, 0, eight_bit_mode)
tensor_act = conv_layer.tensor_activation
act_min_y, act_max_y, _ = range_from_batch(sess, tensor_act, dataset)
cur_k210.act = K210Act(tensor_act, act_min_y, act_max_y, conv_layer.config['activation'],
eight_bit_mode=eight_bit_mode)
if len(buffer) > 0 and isinstance(buffer[-1], tensor_list_to_layer_list.LayerPool):
pool_layer = buffer.pop()
assert (isinstance(pool_layer, tensor_list_to_layer_list.LayerPool))
pool_size = pool_layer.config['size']
pool_stride = pool_layer.config['stride']
pool_type = pool_layer.tensor_pool.op.name
# hotfix
if pool_stride == 1 and conv_layer.config['stride'] == 2:
pool_size = 2
if pool_size== 2 and pool_layer.tensor_pool.op.inputs[0].shape[3] % 2 != 0:
if pool_layer.tensor_pool.op.get_attr('padding') == b'SAME':
raise ValueError("at {} unsupport padding mode SAME of pooling with size == 2".format(pool_layer.tensor_pool.name))
cur_k210.pool = K210Pool(pool_type, pool_size, pool_stride)
# hotfix
elif conv_layer.config['stride'] == 2:
pool_size = 2
pool_stride = 2
pool_type = 'hotfix_leftPool'
cur_k210.pool = K210Pool(pool_type, pool_size, pool_stride)
return cur_k210
def make_id_layer(base_tensor, min_v, max_v, eight_bit_mode, range_from_batch):
o_ch = base_tensor.shape[1]
input_tensor_shape = sess.run(base_tensor, dataset).shape
cur_k210 = K210Layer(eight_bit_mode)
cur_k210.conv = K210Conv(
weights=np.array([[[[1]]]]),
input_tensor_name='<id_layer>',
depth_wise_layer=True,
eight_bit_mode=eight_bit_mode, xy_shape=[input_tensor_shape, input_tensor_shape],
xw_minmax=[min_v, max_v, min_v, max_v]
)
cur_k210.bn = K210BN(0, 1, | np.ones(o_ch) | numpy.ones |
# -*- coding: utf-8 -*-
"""F
Created on Wed Feb 26 10:24:21 2020
@author: <NAME>
"""
import sys
import math
import random
import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import cbook
from matplotlib import cm
from matplotlib.colors import LightSource
from matplotlib.colors import Normalize
from scipy import signal
from scipy import stats
from sklearn.cross_decomposition import PLSRegression
#from pls import PLSRegression #own SIMPLS based alternative to sklearn
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
import warnings
from fssreg import FSSRegression
from ipls import IntervalPLSRegression
from class_mcw_pls import mcw_pls_sklearn
from osc import OSC
warnings.filterwarnings('ignore')
class InputError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NIRData:
def __init__(self, df, y_name="value",date_name="refdate",
cval="MD",cval_param=None):
# The class takes input dataframe in the following format:
# -it needs to be a pandas dataframe
# -it can only have the following columns: spectra variables,
# measurement date, single dependent variable
# -the measurement date and dpeendent variable column's name needs to be specified
# -the CV method needs to be defined, it supports MD and kfold, for kfold
# the number of folds needs to be defined with cval_param
self.df0=df.copy()
self.df=df.copy()
#Column with dependent variable
self.y_name=y_name
#Date column
self.date_name=date_name
#Columns with predictors
self.freqs = [col for col in df.columns if col not in [date_name, y_name]]
#If frequency columns are not all numeric, convert them
if len([x for x in self.freqs if isinstance(x, float)])<len(self.freqs):
self.freqs=[float(freq) for freq in self.freqs]
self.df0.columns=[float(col) if col not in [date_name, y_name] else col for col in df.columns]
self.df.columns=[float(col) if col not in [date_name, y_name] else col for col in df.columns]
self.cval=cval
if cval!="MD":
if cval_param==None:
raise InputError("Missing cross validation parameter!")
self.cval_param=cval_param
#Changing the cross validation method without reinstantiating the class
def set_cval(self,cval_new):
self.cval=cval_new
################# Preprocessing techniques
# Resetting the pre-processing to the raw spectra
def reset(self):
self.df=self.df0.copy()
# Preprocessing methods (detrending, SG filter, SNV, MSC)
def to_percent(self):
f = lambda x: x/100
a=np.vectorize(f)(self.df[self.freqs].to_numpy())
self.df.loc[:, self.freqs]=a
# Convert transmittance/reflectance to absorbance
def to_absorb(self,mode="R",percent=False):
# If source is transmittance, use mode="T", if reflectance mode="R"
# Functions only valid if data is between 0-1 (percent=True)
# otherwise convert the T/R values to percent
if not percent:
self.to_percent()
if mode=="T":
f = lambda x: math.log10(1/x)
elif mode=="R":
f = lambda x: ((1-x)**2)/x
else:
raise Exception("Invalid mode, has to be either T or R")
a=np.vectorize(f)(self.df[self.freqs].to_numpy())
self.df.loc[:, self.freqs]=a
# Detrending
def detrend(self, degree=1):
# Calculates a linear trend or a constant (the mean) for every
# spectral line and subtracts it
# Result is slightly different from manually implementing it!!!
x=np.array([self.freqs]).reshape(-1,)
Y=self.df[self.freqs].to_numpy()
for i in range(Y.shape[0]):
y=Y[i,:]
fit = np.polyfit(x, y, degree)
trend=np.polyval(fit, x)
y=y-trend
Y[i,:]=y
self.df.loc[:, self.freqs]=Y
# Savitzky-Golay filter
def sgfilter(self,window_length=13,polyorder=2,deriv=1):
a=signal.savgol_filter(self.df[self.freqs]
,window_length, polyorder, deriv, delta=1.0,axis=-1, mode='interp', cval=0.0)
self.df[self.freqs]=a
# SNV
def snv(self):
scaler = StandardScaler(with_mean=True, with_std=True)
scaler.fit(self.df[self.freqs].T)
self.df.loc[:, self.freqs]=scaler.transform(
self.df[self.freqs].T).T
# MSC
def msc(self):
ref=np.mean(self.df[self.freqs],axis=0)
X=np.matrix(self.df[self.freqs],dtype='float')
for i in range(self.df.shape[0]):
A=np.vstack([np.matrix(ref,dtype='float'),
np.ones(X.shape[1])]).T
coef, resids, rank, s = np.linalg.lstsq(
A,X[i,:].T)
X[i,:]=(X[i,:]-coef[1])/coef[0]
self.df[self.freqs]=X
# OSC is supervised preprocessing, so it needs CV, for which a joint modeling step is needed
# this method only crossvalidates using PLS, for other models use the built in osc_params
def osc_cv(self,nicomp_range=range(10,130,10),ncomp_range=range(1,5),epsilon = 10e-6,
max_iters = 20,model="pls",model_parameter_range=range(1,11)):
# Separating X from Y for PLS
# Needs to be converted to numpy array from pandas df
X=self.df[self.freqs].to_numpy()
# Y need to be converted to numpy array from pandas series and reshaped to (N,1) from (N,)
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
# CV based on measurement day
if self.cval=="MD":
cv = LeaveOneGroupOut()
folds=list(cv.split(X=X,y=Y,groups=self.df[self.date_name]))
# kfold CV
elif self.cval=="kfold":
cv = KFold(n_splits=self.cval_param)
folds=list(cv.split(X))
else:
raise InputError("Invalid CV type!")
#Matrix for cv values for all the possible parameter combinations
cv_RMSE_all=np.zeros([len(folds),len(model_parameter_range),len(nicomp_range),len(ncomp_range)])
i=0
#possible internal component values for osc
for nicomp in nicomp_range:
j=0
#possible removed component values for osc
for ncomp in ncomp_range:
k=0
for train, val in folds:
# train osc
osc_obj=OSC("SWosc",nicomp,ncomp,epsilon, max_iters)
X_osc_train, W,P,mu_x=osc_obj.fit(X[train],Y[train])
# apply osc on validation set
# mean center data, alternatively the training set's mean can be used
# if you think it is a better estimate by mean="training"
X_osc_val=osc_obj.transform(X[val],mean="estimate")
l=0
#possible model patrameter values for pls
for param in model_parameter_range:
#setup pls model
pls = PLSRegression(param,scale=False)
#train pls
pls.fit(X_osc_train, Y[train])
#predict with pls and calculate error
cv_RMSE_all[k,l,i,j]=metrics.mean_squared_error(
Y[val], pls.predict(X_osc_val))**0.5
l=l+1
k=k+1
j=j+1
i=i+1
# Calculate mean performance across the folds
cv_RMSE_mean=np.mean(cv_RMSE_all,axis=0)
# Find maximum for every osc paremeter combination
cv_RMSE=np.amax(cv_RMSE_mean, axis=0)
cv_RPD=np.std(self.df[self.y_name])/cv_RMSE
fig = plt.figure(figsize=(10,5))
ax = plt.axes(projection="3d")
# Cartesian indexing (x,y) transposes matrix indexing (i,j)
x, y = np.meshgrid(list(ncomp_range),list(nicomp_range))
z=cv_RPD
ls = LightSource(200, 45)
rgb = ls.shade(z, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=rgb,
linewidth=0, antialiased=False, shade=False)
plt.show()
# Best model
print("Best RMSE: ",np.amin(cv_RMSE))
print("Best RPD: ",np.std(self.df[self.y_name])/np.amin(cv_RMSE))
print("Number of internal components: ",nicomp_range[np.where(
cv_RMSE==np.amin(cv_RMSE))[0][0]])
print("Number of removed components: ",ncomp_range[np.where(
cv_RMSE==np.amin(cv_RMSE))[1][0]])
return cv_RMSE
############### Plotting methods
# Plotting the current processed version of the spectra
def plot_spectra(self, processed=True, savefig=False, *args):
fig,ax = plt.subplots(figsize=(12, 8))
if processed:
# Plotting unprocessed spectra
ax.plot(self.df[self.freqs].T)
else:
# Plotting processed spectra
ax.plot(self.df0[self.freqs].T)
for arg in args:
ax.axvline(x=arg)
if savefig:
plt.savefig('plot_spectra.pdf')
# Plotting the fitted PLS model's regression weights on the spectra
def plot_pls(self):
#r=self.pls_obj.x_rotations_
r=self.pls_obj.coef_
fig, ax = plt.subplots(figsize=(12, 8))
ax.plot(self.df[self.freqs].T,c="grey",alpha=1)
ax.pcolorfast((np.min(self.freqs),np.max(self.freqs)), ax.get_ylim(),
r.T,cmap='seismic',vmin=-1,vmax=1, alpha=1)
norm = Normalize(vmin=-1, vmax=1)
scalarmappaple = cm.ScalarMappable(norm=norm,cmap='seismic')
scalarmappaple.set_array(r.T)
fig.colorbar(scalarmappaple)
# Plotting the fitted MCW-PLS model's sample weights for the individual spectra
def plot_mcw_pls(self):
a=np.diagonal(self.mcw_pls_obj.sample_weights)
cmap = plt.cm.get_cmap('seismic')
fig, ax = plt.subplots(figsize=(6, 4))
for i in range(self.df[self.freqs].shape[0]):
row=self.df[self.freqs].iloc[i]
ax.plot(row,c=cmap(a[i]),alpha=1)
scalarmappaple = cm.ScalarMappable(cmap=cmap)
scalarmappaple.set_array(a)
plt.colorbar(scalarmappaple)
r=self.mcw_pls_obj.BPLS
fig, ax = plt.subplots(figsize=(6, 4))
ax.plot(self.df[self.freqs].T,c="grey",alpha=1)
ax.pcolorfast((np.min(self.freqs),np.max(self.freqs)), ax.get_ylim(),
r.T,cmap='seismic',vmin=-1,vmax=1, alpha=1)
norm = Normalize(vmin=-1, vmax=1)
scalarmappaple = cm.ScalarMappable(norm=norm,cmap='seismic')
scalarmappaple.set_array(r.T)
fig.colorbar(scalarmappaple)
######################### Modeling methods
# Support vector regression
# For fitting a model with given parameters
def svr_pipe(self,gam,c,eps):
X=self.df[self.freqs].to_numpy()
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
self.svr_pipe_obj = Pipeline([('scaler', StandardScaler()),
('support vector regression',
SVR(kernel="rbf",gamma=gam,C=c,epsilon=eps))])
self.svr_pipe_obj.fit(X, Y)
# For evaluating a model with given parameters
def svr_eval(self, gam,c,eps):
X=self.df[self.freqs].to_numpy()
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
pipe = Pipeline([('scaler', StandardScaler()),
('support vector regression',
SVR(kernel="rbf",gamma=gam,C=c,epsilon=eps))])
self.eval_df=pd.DataFrame(columns = ["estimated","true"])
if self.cval=="MD":
cv = LeaveOneGroupOut()
folds=list(cv.split(X=X,y=Y,groups=self.df[self.date_name]))
cv_RMSE=np.zeros(len(folds))
i=0
for train, val in folds:
pipe.fit(X[train], Y[train])
cv_RMSE[i]=metrics.mean_squared_error(
Y[val], pipe.predict(X[val]))**0.5
eval_new=pd.DataFrame({'estimated': pipe.predict(X[val]).reshape((-1,)),
'true': Y[val].reshape((-1,))})
self.eval_df=self.eval_df.append(eval_new, ignore_index = True)
i=i+1
y_true=self.eval_df["true"]
y_est=self.eval_df["estimated"]
print(np.std(y_true)/metrics.mean_squared_error(y_true,y_est)**0.5)
print(np.std(y_true)/np.mean(cv_RMSE))
residuals=y_true-y_est
linreg = stats.linregress(y_true, y_est)
blue='#1f77b4'
# Observed vs predicted
fig,ax = plt.subplots(figsize=(5, 5))
ax.scatter(x=y_true,y=y_est)
# Perfect prediction
ax.plot([np.min(Y), np.max(Y)], [np.min(Y), np.max(Y)], 'k--', color = 'r',label='Perfect fit')
# Model fit
ax.plot(y_true, linreg.intercept + linreg.slope*y_true, blue,label='Predicted fit')
# Text location needs to be picked manually
#ax.text(48, 56, 'R$^2$ = %0.002f' % linreg.rvalue,color=blue)
ax.text(93, 95, 'R$^2$ = %0.002f' % linreg.rvalue,color=blue)
ax.set(xlabel="Observed (%)",ylabel="Predicted (%)")
ax.legend()
# Predicted vs residuals
fig,ax = plt.subplots(figsize=(5, 5))
ax.scatter(x=y_est,y=residuals)
ax.axhline(y=np.mean(residuals), color='r', linestyle='--',label='Mean = %0.6f' % np.mean(residuals))
ax.set(xlabel="Predicted (%)",ylabel="Residuals (%)")
ax.legend()
# QQ plot
fig,ax = plt.subplots(figsize=(5, 5))
stats.probplot(residuals,plot=ax)
ax.get_lines()[0].set_markerfacecolor(blue)
ax.get_lines()[0].set_markeredgecolor(blue)
ax.get_figure().gca().set_title("")
ax.get_figure().gca().set_ylabel("Residuals (%)")
# Residual density plot with normal density
normx = np.linspace(-8,8,1000)
normy = stats.norm.pdf(normx, loc=np.mean(residuals), scale=np.std(residuals))
fig,ax = plt.subplots(figsize=(5, 5))
sns.distplot(residuals,norm_hist=True,ax=ax,color=blue)
ax.plot(normx,normy,color='r')
sns.set_style("white")
# Sorted alphas plot
# Get alphas
alphas=self.svr_pipe_obj['support vector regression'].dual_coef_
# Take abs value and sort
alphas=abs(alphas)
alphas=np.sort(alphas)
# Add zero alphas
alphas=np.vstack((np.zeros((X.shape[0]-len(alphas.T),1)),alphas.T))
fig,ax = plt.subplots(figsize=(5, 5))
ax.plot(alphas)
ax.set(xlabel="Sample ranking",ylabel="SV absolute α value")
# Method for tuning an SVM regression's free parameters based on CV
# OSC built in option, as this preprocessing is supervised so needs to be validated at the same time
def svr_cv(self,gam_start=0.001,
c_start=100,
eps_start=0.1,
optimization="grid",gridscale=5,non_improve_lim=10,verbose=False,
osc_params=None):
# Separating X from Y for PLS
X=self.df[self.freqs].to_numpy()
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
sample_std=np.std(self.df[self.y_name])
# CV based on measurement day
if self.cval=="MD":
cv = LeaveOneGroupOut()
folds=list(cv.split(X=X,y=Y,groups=self.df[self.date_name]))
# kfold CV
elif self.cval=="kfold":
cv = KFold(n_splits=self.cval_param)
folds=list(cv.split(X))
else:
raise InputError("Invalid CV type!")
if optimization=="none":
cv_RMSE=np.zeros(len(folds))
# Only use RBF kernels, also standardize data
pipe = Pipeline([('scaler', StandardScaler()),
('support vector regression',
SVR(kernel="rbf",gamma=gam_start,C=c_start,epsilon=eps_start))])
l=0
for train, val in folds:
pipe.fit(X[train], Y[train])
cv_RMSE[l]=metrics.mean_squared_error(
Y[val], pipe.predict(X[val]))**0.5
l=l+1
gam_best=gam_start
c_best=c_start
eps_best=eps_start
rpd_best=sample_std/np.mean(cv_RMSE)
elif optimization=="grid":
# Create a search vector from starting values for gridsearch
gam_list=np.linspace(gam_start/gridscale,gam_start*gridscale,10)
c_list=np.linspace(c_start/gridscale,c_start*gridscale,10)
eps_list=np.linspace(eps_start/gridscale,eps_start*gridscale,10)
# Create list of ndarrays from parameter search vectors,
# it will help with making the cood more tidy
param_lists=[gam_list,c_list,eps_list]
param_best=np.zeros(3)
rpd_best_all=0
non_improve=0
repeat=True
while repeat:
# Array for storing CV errors
cv_RMSE_all=np.zeros([len(folds),len(gam_list),len(c_list),len(eps_list)])
# Put the CV iteration outside to save time when using OSC
i=0
for train, val in folds:
# If OSC model specified
if len(osc_params)==2:
osc=OSC(nicomp=osc_params[0],ncomp=osc_params[1])
osc.fit(X[train], Y[train])
X_train_osc=osc.X_osc
X_val_osc=osc.transform(X[val])
j=0
for gam in param_lists[0]:
k=0
for c in param_lists[1]:
l=0
for eps in param_lists[2]:
pipe = Pipeline([('scaler', StandardScaler()),
('support vector regression', SVR(kernel="rbf",gamma=gam,C=c,epsilon=eps))])
if len(osc_params)==2:
pipe.fit(X_train_osc, Y[train])
cv_RMSE_all[i,j,k,l]=metrics.mean_squared_error(
Y[val], pipe.predict(X_val_osc))**0.5
else:
pipe.fit(X[train], Y[train])
cv_RMSE_all[i,j,k,l]=metrics.mean_squared_error(
Y[val], pipe.predict(X[val]))**0.5
l=l+1
k=k+1
j=j+1
i=i+1
cv_RMSE=np.mean(cv_RMSE_all,axis=0)
# Best model
param_best[0]=param_lists[0][np.where(
cv_RMSE==np.amin(cv_RMSE))[0][0]]
param_best[1]=param_lists[1][np.where(
cv_RMSE==np.amin(cv_RMSE))[1][0]]
param_best[2]=param_lists[2][np.where(
cv_RMSE==np.amin(cv_RMSE))[2][0]]
rpd_best=sample_std/np.amin(cv_RMSE)
# Check against all time best
if rpd_best>rpd_best_all:
param_best_all = param_best.copy()
rpd_best_all=rpd_best
else:
# Increase counter if there is no improvement
non_improve=non_improve+1
if verbose==True:
print("Best RMSE: ",np.amin(cv_RMSE))
print("Best RPD: ",rpd_best)
print("Gamma: ",param_best[0])
print("C: ",param_best[1])
print("Epsilon: ",param_best[2])
repeat=False
for index,p in enumerate(param_best):
# Check if best value is in IQ range
if p<np.quantile(param_lists[index],0.2) or p>np.quantile(param_lists[index],0.8):
# If not, move the search interval based on the magnitude of the best value
scale=math.floor(math.log10(p))-1
lower=p-(10**scale)*5
upper=p+(10**scale)*5
# If best value is at the extreme of the interval expand it by a lot that way
if min(param_lists[index])==p:
lower=min(param_lists[index])/2
elif max(param_lists[index])==p:
upper=max(param_lists[index])*2
# Create new search vector
param_lists[index]=np.linspace(lower,upper,10)
# Repeat evaluation
repeat=True
# Terminate early if no improvements in 10 iterations
if non_improve>non_improve_lim:
repeat=False
print("No improvement, terminate early.")
if repeat:
print("new iteration")
# Set final values to all time best
gam_best=param_best_all[0]
c_best=param_best_all[1]
eps_best=param_best_all[2]
rpd_best=rpd_best_all
# Simulated annealing
elif optimization=="sa":
# Number of cycles
cycles = 100
# Trials per cycle
trials = 100
# Number of accepted solutions
n_accepted = 0.0
# Probability of accepting worse solution at the start
p_start = 0.3
# Probability of accepting worse solution at the end
p_end = 0.001
# Initial temperature
t_start = -1.0/math.log(p_start)
# Final temperature
t_end = -1.0/math.log(p_end)
# Use geometric temp reduction
frac = (t_end/t_start)**(1.0/(cycles-1.0))
# Starting values
t=t_start
dE_mean = 0.0
gam=gam_start
c=c_start
eps=eps_start
# Calculate starting cost
cv_RMSE=np.zeros(len(folds))
pipe = Pipeline([('scaler', StandardScaler()),
('support vector regression',
SVR(kernel="rbf",gamma=gam,C=c,epsilon=eps))])
L=0
for train, val in folds:
pipe.fit(X[train], Y[train])
cv_RMSE[L]=metrics.mean_squared_error(
Y[val], pipe.predict(X[val]))**0.5
L=L+1
cost=np.mean(cv_RMSE)
rpd=sample_std/cost
print("starting RPD:",rpd)
# Best results
gam_old = gam
c_old = c
eps_old = eps
cost_old=cost
rpd_old=rpd
# All time best result
gam_best = gam
c_best = c
eps_best = eps
cost_best=cost
rpd_best = rpd
for i in range(cycles):
if verbose and i%10==0 and i>0:
print('Cycle: ', i ,' with Temperature: ', t)
print('RPD=',rpd_old,'Gamma=' ,gam_old,', C=' ,c_old,', epsilon=',eps_old)
for j in range(trials):
# Generate new trial points
gam = gam_old + (random.random()-0.5)*2/1000
c = c_old + (random.random()-0.5)*2*10
eps = eps_old + (random.random()-0.5)*2/100
# Enforce lower bounds
gam = max(gam,0.0000001)
c = max(c,0.0000001)
eps = max(eps,0)
# Calculate cost
cv_RMSE=np.zeros(len(folds))
pipe = Pipeline([('scaler', StandardScaler()),
('support vector regression',
SVR(kernel="rbf",gamma=gam,C=c,epsilon=eps))])
L=0
for train, val in folds:
pipe.fit(X[train], Y[train])
cv_RMSE[L]=metrics.mean_squared_error(
Y[val], pipe.predict(X[val]))**0.5
L=L+1
cost=np.mean(cv_RMSE)
rpd=sample_std/cost
dE = cost-cost_old
# If new cost is higher
if dE > 0:
if (i==0 and j==0): dE_mean = dE
# Generate probability of acceptance
p = math.exp(-dE/(dE_mean * t))
# Determine whether to accept worse point
if (random.random()<p):
accept = True
else:
accept = False
else:
# New cost is lower, automatically accept
accept = True
# Check if cost is lower than all time best
if cost<cost_best:
# If new best, store the parameters, cost and RPD
gam_best=gam
c_best=c
eps_best=eps
cost_best=cost
rpd_best=rpd
if accept==True:
# Update parameters, cost and RPD
gam_old = gam
c_old = c
eps_old = eps
cost_old=cost
rpd_old=rpd
# Increment number of accepted solutions
n_accepted = n_accepted + 1
# Update energy change
dE_mean = (dE_mean * (n_accepted-1) + abs(dE)) / n_accepted
# Lower the temperature for next cycle
t = frac * t
# Return the best setting found
else:
raise InputError("Invalid optimization strategy!")
return (gam_best,c_best,eps_best,rpd_best)
# Method for selecting nr of PLS components based on CV
def pls_cv(self,ncomp_range=range(1,21),plot=False,verbose=False,
osc_params=(10,1)):
# Separating X from Y for PLS
X=self.df[self.freqs].to_numpy()
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
sample_std=np.std(self.df[self.y_name])
# CV based on measurement day
if self.cval=="MD":
cv = LeaveOneGroupOut()
folds=list(cv.split(X=X,y=Y,groups=self.df[self.date_name]))
# kfold CV
elif self.cval=="kfold":
cv = KFold(n_splits=self.cval_param)
folds=list(cv.split(X))
else:
raise InputError("Invalid CV type!")
# Array for storing CV errors
cv_RMSE_all=np.zeros([len(folds),len(ncomp_range)])
i=0
for train, val in folds:
# If OSC model specified
if len(osc_params)==2:
osc=OSC(nicomp=osc_params[0],ncomp=osc_params[1])
osc.fit(X[train], Y[train])
X_train_osc=osc.X_osc
X_val_osc=osc.transform(X[val])
j=0
for ncomp in ncomp_range:
pls = PLSRegression(n_components=ncomp,scale=False)
if len(osc_params)==2:
pls.fit(X_train_osc, Y[train])
cv_RMSE_all[i,j]=metrics.mean_squared_error(
Y[val], pls.predict(X_val_osc))**0.5
else:
pls.fit(X[train], Y[train])
cv_RMSE_all[i,j]=metrics.mean_squared_error(
Y[val], pls.predict(X[val]))**0.5
j=j+1
i=i+1
# Printing and plotting CV results
cv_RMSE_ncomp=np.mean(cv_RMSE_all,axis=0)
cv_RPD_ncomp=sample_std/cv_RMSE_ncomp
if plot:
fig = plt.figure(figsize=(12,8))
plt.gca().xaxis.grid(True)
plt.xticks(ncomp_range)
plt.ylabel("RPD")
plt.xlabel("Number of components")
plt.plot(ncomp_range,cv_RPD_ncomp)
# Best model
rpd_best=max(cv_RPD_ncomp)
ncomp_best=ncomp_range[cv_RMSE_ncomp.argmin()]
if verbose:
print("Best RMSE: ",min(cv_RMSE_ncomp))
print("Best RPD: ",max(cv_RPD_ncomp))
print("Number of latent components: ",ncomp_range[cv_RMSE_ncomp.argmin()])
return (ncomp_best,rpd_best)
# Method for evaluating PLS CV performance with given nr of components
def pls_eval(self,ncomp):
# Separating X from Y for PLS
X=self.df[self.freqs].to_numpy()
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
self.eval_df=pd.DataFrame(columns = ["estimated","true"])
if self.cval=="MD":
days=self.df[self.date_name].unique()
# DataFrame for predicted and true y values
pls = PLSRegression(n_components=ncomp,scale=False)
for day in days:
val=self.df[self.date_name]==day
train=~val
pls.fit(X[train], Y[train])
# sklearn output is (N,1), has to be flattened to (N,) for pandas...
eval_new=pd.DataFrame({'estimated': pls.predict(X[val]).reshape((-1,)),
'true': Y[val]})
self.eval_df=self.eval_df.append(eval_new, ignore_index = True)
plt.scatter(x=self.eval_df["true"],y=self.eval_df["estimated"])
plt.ylabel("Estimated")
plt.xlabel("True")
plt.axhline(y=np.mean(Y)+np.std(Y), color='r', linestyle='--')
plt.axhline(y=np.mean(Y)-np.std(Y), color='r', linestyle='--')
plt.axhline(y=np.mean(Y), color='r', linestyle='-')
plt.plot([np.min(Y), np.max(Y)], [np.min(Y), np.max(Y)], 'k-', color = 'b')
# Method for fitting a PLS model with given nr of components
def pls(self,ncomp):
# Separating X from Y for PLS
X=self.df[self.freqs].to_numpy()
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
self.pls_obj= PLSRegression(n_components=ncomp,scale=False)
self.pls_obj.fit(X, Y)
# Method for fitting a PLS model with given nr of components
def mcw_pls(self,ncomp,sig,max_iter=30, R_initial=None):
# Separating X from Y for PLS
# Needs to be converted to numpy array from pandas df
X=self.df[self.freqs].to_numpy()
# Y need to be converted to numpy array from pandas series and reshaped to (N,1) from (N,)
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
self.mcw_pls_obj=mcw_pls_sklearn(n_components=ncomp, max_iter=30, R_initial=None, scale_sigma2=sig)
self.mcw_pls_obj.fit(X, Y)
def mcw_pls_eval(self,ncomp,sig,max_iter=30, R_initial=None):
X=self.df[self.freqs].to_numpy()
Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
pls = mcw_pls_sklearn(n_components=ncomp, max_iter=30, R_initial=None, scale_sigma2=sig)
self.eval_df=pd.DataFrame(columns = ["estimated","true"])
if self.cval=="MD":
cv = LeaveOneGroupOut()
folds=list(cv.split(X=X,y=Y,groups=self.df[self.date_name]))
cv_RMSE=np.zeros(len(folds))
i=0
for train, val in folds:
pls.fit(X[train], Y[train])
cv_RMSE[i]=metrics.mean_squared_error(
Y[val], pls.predict(X[val]))**0.5
eval_new=pd.DataFrame({'estimated': pls.predict(X[val]).reshape((-1,)),
'true': Y[val].reshape((-1,))})
self.eval_df=self.eval_df.append(eval_new, ignore_index = True)
i=i+1
y_true=self.eval_df["true"]
y_est=self.eval_df["estimated"]
print(np.std(y_true)/metrics.mean_squared_error(y_true,y_est)**0.5)
print(np.std(y_true)/np.mean(cv_RMSE))
residuals=y_true-y_est
linreg = stats.linregress(y_true, y_est)
blue='#1f77b4'
# Observed vs predicted
fig,ax = plt.subplots(figsize=(5, 5))
ax.scatter(x=y_true,y=y_est)
# Perfect prediction
ax.plot([np.min(Y), np.max(Y)], [np.min(Y), np.max(Y)], 'k--', color = 'r',label='Perfect fit')
# Model fit
ax.plot(y_true, linreg.intercept + linreg.slope*y_true, blue,label='Predicted fit')
# Text location needs to be picked manually
ax.text(48, 56, 'R$^2$ = %0.002f' % linreg.rvalue,color=blue)
#ax.text(93, 95, 'R$^2$ = %0.002f' % linreg.rvalue,color=blue)
ax.set(xlabel="Observed (%)",ylabel="Predicted (%)")
ax.legend()
# Predicted vs residuals
fig,ax = plt.subplots(figsize=(5, 5))
ax.scatter(x=y_est,y=residuals)
ax.axhline(y=np.mean(residuals), color='r', linestyle='--',label='Mean = %0.6f' % np.mean(residuals))
ax.set(xlabel="Predicted (%)",ylabel="Residuals (%)")
ax.legend()
# QQ plot
fig,ax = plt.subplots(figsize=(5, 5))
stats.probplot(residuals,plot=ax)
ax.get_lines()[0].set_markerfacecolor(blue)
ax.get_lines()[0].set_markeredgecolor(blue)
ax.get_figure().gca().set_title("")
ax.get_figure().gca().set_ylabel("Residuals (%)")
# Residual density plot with normal density
normx = np.linspace(-4,4,1000)
normy = stats.norm.pdf(normx, loc= | np.mean(residuals) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Created on Sat May 8 01:25:33 2021
@author: WEN
"""
import numpy as np
class PCA:
def __init__(self, n_components=None):
self.n_components = n_components
def fit(self, data):
data_T = data.transpose()
L, K = data_T.shape
self.m = (np.mean(data_T, 1)).reshape(L, 1)
C = np.cov(data_T - self.m)
self.eigen_vals, self.eigen_vecs = | np.linalg.eigh(C) | numpy.linalg.eigh |
from __future__ import absolute_import
from __future__ import division
import numbers
import numpy as np
from scipy.spatial.ckdtree import cKDTree
EPS = np.finfo(np.float32).eps
MIN_LAT = -90.0
MAX_LAT = 90.0 - EPS
MIN_LNG = -180.0
MAX_LNG = 180.0 - EPS
EARTH_MEAN_RADIUS = 6371.01
class SkNNI:
"""
Spherical k-nearest neighbors interpolator.
"""
def __init__(self, observations, r=EARTH_MEAN_RADIUS):
"""
Initializes a SkNNI.
:param observations: Array-like of observation triplets (lat, lng, val).
:param r: Radius of the interpolation sphere (defaults to Earth's mean
radius).
"""
# Converts the observations array-like into a NumPy array
observations = np.array(observations).astype(np.float32)
if observations.ndim != 2 or observations.shape[-1] != 3:
raise ValueError('Parameter "observations" must be a NumPy ndarray '
'of shape (-1, 3).')
if np.isnan(observations).any():
raise ValueError('Parameter "observations" contains at least one '
'NaN value.')
# Clips the latitude and longitude values to their valid range
observations[:, 0] = np.clip(observations[:, 0], MIN_LAT, MAX_LAT)
observations[:, 1] = np.clip(observations[:, 1], MIN_LNG, MAX_LNG)
if not isinstance(r, numbers.Real) or r <= 0:
raise ValueError('Parameter "r" must be a strictly positive real '
'number.')
self.observations = observations
self.r = r
# Converts degrees to radians
self.obs_lats_rad = np.radians(observations[:, 0])
self.obs_lngs_rad = np.radians(observations[:, 1])
self.obs_values = observations[:, 2]
# Converts polar coordinates to cartesian coordinates
x, y, z = self.__polar_to_cartesian(
self.obs_lats_rad, self.obs_lngs_rad, r)
# Builds a k-dimensional tree using the transformed observations
self.kd_tree = cKDTree(np.stack((x, y, z), axis=-1))
def __call__(self, interp_coords, k=20, interp_fn=None):
"""
Runs SkNNI for the given interpolation coordinates.
:param interp_coords: Array-like of interpolation pairs (lat, lng).
:param k: Number of nearest neighbors to consider (defaults to 20).
:param interp_fn: Interpolation function (defaults to NDDNISD).
:return: Interpolation triplets (lat, lng, interp_val).
"""
# Converts the interp_coords array-like into a NumPy array
interp_coords = np.array(interp_coords).astype(np.float32)
if interp_coords.ndim != 2 or interp_coords.shape[-1] != 2:
raise ValueError('Parameter "interp_coords" must be a NumPy '
'ndarray of shape (-1, 2).')
if np.isnan(interp_coords).any():
raise ValueError('Parameter "interp_coords" contains at least one '
'NaN value.')
# Clips the latitude and longitude values to their valid range
interp_coords[:, 0] = np.clip(interp_coords[:, 0], MIN_LAT, MAX_LAT)
interp_coords[:, 1] = np.clip(interp_coords[:, 1], MIN_LNG, MAX_LNG)
if not isinstance(k, numbers.Integral) or k <= 0:
raise ValueError('Parameter k must be a strictly positive '
'integral number.')
k = min(k, len(self.observations))
if interp_fn is None:
interp_fn = SkNNI.__nddnisd_interp_fn
elif not callable(interp_fn):
raise ValueError('Parameter interp_fn must be a callable '
'function-like object.')
interp_lats_deg = interp_coords[:, 0]
interp_lngs_deg = interp_coords[:, 1]
# Converts degrees to radians
interp_lats_rad = np.radians(interp_lats_deg)
interp_lngs_rad = np.radians(interp_lngs_deg)
# Converts polar coordinates to cartesian coordinates
x, y, z = self.__polar_to_cartesian(
interp_lats_rad, interp_lngs_rad, self.r)
# Build a query for the spatial index
kd_tree_query = np.stack((x, y, z), axis=-1)
# Query the spatial index for the indices of the k nearest neighbors.
_, knn_indices = self.kd_tree.query(kd_tree_query, k=k, n_jobs=-1)
# Get lat, lng and value information for the k nearest neighbors.
knn_lats = self.obs_lats_rad[knn_indices]
knn_lngs = self.obs_lngs_rad[knn_indices]
knn_values = self.obs_values[knn_indices]
if knn_values.ndim < 2:
knn_values = np.expand_dims(knn_values, axis=-1)
# Get interpolation point's lat and lng for each k nearest neighbor.
p_lats = np.tile(interp_lats_rad, (k, 1)).T
p_lngs = np.tile(interp_lngs_rad, (k, 1)).T
# Interpolate data values using the given interpolation function.
interp_values = interp_fn(knn_lats, knn_lngs, knn_values,
p_lats, p_lngs, self.r, k)
return np.stack(
(interp_lats_deg, interp_lngs_deg, interp_values), axis=-1)
@staticmethod
def __polar_to_cartesian(lat, lng, r):
"""
Converts (lat, lng) coordinates in radians to cartesian coordinates.
Args:
lat: Latitude in radians.
lng: Longitude in radians.
r: Radius of the sphere.
Returns: Cartesian coordinates from the given (lat, lng) coordinates.
"""
x = r * np.cos(lng) * | np.cos(lat) | numpy.cos |
"""
The :mod:'atml.exp' module holds a set of functions to perform machine learning experiments and gather the corresponding
performances metrics.
"""
# Author: <NAME> (<EMAIL>)
# License: BSD-3
import numpy
import pandas
import openml
def get_random_split_measurement(model_instance, x, y, measure, sparse=False, cap_size=10000, test_size=0.5):
"""
Perform a random split validation experiment for a given combination of model, dataset, and evaluation measure.
Parameters
----------
model_instance: sklearn.predictor
A model instance with the sklearn.predictor template, it should have a fit() method for model training, and
a predict_proba() method to predict probability vectors on test data.
x: numpy.ndarray
The data matrix with shape (n samples, d dimensions)
y: numpy.ndarray
The label vector with shape (n samples, 1)
measure: atml.Measure
A evaluation measure selected from the atml.measure module.
sparse: boolean
To indicate whether to only use a subset of the dataset to perform the experiments.
cap_size: integer
In the case sparse=True, cap_size specifies the maximum size of the dataset to run the experiments.
test_size: float
The proportion of the dataset that is used as the testing set (validation set).
Returns
----------
measurement: float
The performance measurement on the testing set (validation set).
"""
# x : shape(n, m)
# y : shape(n, 1)
# y_vector: shape(n, k)
n = numpy.shape(x)[0]
_, y = numpy.unique(y, return_inverse=True)
k = len(numpy.unique(y))
shuffle_idx = numpy.random.permutation(numpy.arange(0, n))
x = x[shuffle_idx, :]
y = y[shuffle_idx]
y_vector = numpy.zeros((n, k))
for i in range(0, k):
y_vector[:, i] = (y == i)
if sparse & (n > cap_size):
class_count = numpy.ceil( | numpy.mean(y_vector, axis=0) | numpy.mean |
import os
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from nnAudio.Spectrogram import STFT
from src.dataset.utils.spectgram import calc_istft, calc_stft, get_librosa_params
from src.dataset.utils.waveform_preprocessings import preprocess_strain
def visualize_data(sample_data: np.ndarray, title: Optional[str] = None) -> None:
for i in range(sample_data.shape[0]):
plt.plot(np.arange(sample_data.shape[1]), sample_data[i], label=str(i))
plt.legend()
plt.grid()
if title is not None:
plt.title(title)
def vis_from_df(data_id: str, df: pd.DataFrame, is_train: bool = True) -> None:
target = df[df.id == data_id].to_dict(orient="list")
sample_data = np.load(target["path"][0])
title = f'id:{target["id"][0]}'
if is_train:
title += f', class:{target["target"][0]}'
visualize_data(sample_data=sample_data, title=title)
def vis_psd(
psds: List[np.ndarray], freqs: np.ndarray, labels: Optional[List[str]] = None
):
plt.figure(figsize=(8, 5))
# scale x and y axes
plt.xscale("log", base=2)
plt.yscale("log", base=10)
_colors = ["red", "green", "blue", "black", "grey", "magenta"]
if labels is None:
labels = list(range(0, len(psds)))
# plot nowindow, tukey, welch together
for i, psd in enumerate(psds):
plt.plot(
freqs,
psd,
_colors[i],
label=labels[i],
alpha=0.8,
linewidth=0.5,
)
# plot 1/f^2
# give it the right starting scale to fit with the rest of the plots
# don't include zero frequency
inverse_square = np.array(list(map(lambda f: 1 / (f ** 2), freqs[1:])))
# inverse starts at 1 to take out 1/0
scale_index = 500 # chosen by eye to fit the plot
scale = psds[0][scale_index] / inverse_square[scale_index]
plt.plot(
freqs[1:],
inverse_square * scale,
"red",
label=r"$1 / f^2$",
alpha=0.8,
linewidth=1,
)
# plt.axis([20, 512, 1e-48, 1e-41])
plt.axis([20, 2048, 1e-48, 1e-44])
plt.ylabel("Sn(t)")
plt.xlabel("Freq (Hz)")
plt.legend(loc="upper center")
# plt.title("LIGO PSD data near " + eventname + " at H1")
plt.show()
def vis_stft(
strain: np.ndarray,
stft_params: Dict[str, Any],
title_1: str,
lib: str = "librosa",
is_db: bool = False,
y_axis: str = "hz",
amin: float = 1.0e-25,
top_db: int = 200,
ref: float = 1.0,
target: str = "lngDeg_diff_center",
target_gt: Optional[str] = None,
time_range: Optional[Tuple[int, int]] = None,
save_path: Optional[str] = None,
dtype: np.dtype = np.float64,
):
if lib == "librosa":
wave_transform = partial(
librosa.stft, **get_librosa_params(stft_params=stft_params)
)
elif lib == "nnAudio":
wave_transform = STFT(**stft_params)
else:
raise NotImplementedError(f"unexpected value for lib: {lib}")
D_abs, D_theta = calc_stft(
x=strain,
wave_transform=wave_transform,
stft_params=stft_params,
lib=lib,
is_db=is_db,
amin=amin,
top_db=top_db,
ref=ref,
dtype=dtype,
)
win_length = stft_params["win_length"]
sr = stft_params["sr"]
hop_length = stft_params["hop_length"]
# n_fft = stft_params["n_fft"]
# === PLOT ===
nrows = 4
ncols = 2
fig, ax = plt.subplots(
nrows=nrows,
ncols=ncols,
figsize=(12, 6),
sharey=False,
sharex=False,
)
fig.suptitle("Log Frequency Spectrogram", fontsize=16)
# fig.delaxes(ax[1, 2])
title_text = f"W:{win_length}"
if is_db:
title_text = "Axis:dB, " + title_text
else:
title_text = "Axis:Lin, " + title_text
ax[0][0].set_title(target + ", " + title_text, fontsize=10)
ax[0][1].set_title(title_1, fontsize=10)
time = np.arange(0, strain.shape[0] / sr, 1.0 / sr)
ax[0][0].plot(time, strain, label=f"{target}")
ax[0][0].legend(loc="upper right")
ax[0][1].legend(loc="upper right")
ax[0][0].set_xlim(time.min(), time.max())
for nrow, mat in enumerate([D_abs, | np.cos(D_theta) | numpy.cos |
# Safe controllers which do a black box optimization incorporating the constraint costs.
import copy
import numpy as np
import scipy.stats as stats
import torch
device = torch.device( "cuda" if torch.cuda.is_available() else "cpu")
class safeARC(object):
def __init__(self, env, models, critic, termination_function):
###########
# params
###########
self.horizon = 5 # Hyperparam search [5,8]
self.reward_horizon = 8
self.N = 100 # Hyperparam search [100,400]
self.models = models
self.env = copy.deepcopy(env)
self.critic = critic
self.mixture_coefficient = 0.05
self.max_iters = 5
self.actor_traj = int(self.mixture_coefficient*self.N)
self.num_elites = 20
self.obs_dim = self.env.observation_space.shape[0]
self.action_dim = self.env.action_space.shape[0]
self.sol_dim = self.env.action_space.shape[0] * self.horizon
self.ub = np.repeat(self.env.action_space.high,self.horizon,axis=0)
self.lb = np.repeat(self.env.action_space.low,self.horizon,axis=0)
self.alpha = 0.1
self.mean = np.zeros((self.sol_dim,))
self.termination_function=termination_function
self.particles = 4
self.safety_threshold = 0.2
self.minimal_elites = 10
self.kappa = 1
def reset(self):
self.mean = np.zeros((self.sol_dim,))
def get_action(self, curr_state,env=None):
actor_state = np.array([np.concatenate(([0],curr_state.copy()),axis=0)] * (self.actor_traj))# Size [actor_traj,state_dim]
curr_state = np.array([np.concatenate(([0],curr_state.copy()),axis=0)] * ((self.N+self.actor_traj)*self.particles))
curr_state = np.expand_dims(curr_state, axis=0)
curr_state = np.repeat(
curr_state,
self.models.model.network_size,
0) # [numEnsemble, N+actor_traj,state_dim]
# initial mean and var of the sampling normal dist
self.mean[:-self.action_dim] = self.mean[self.action_dim:]
self.mean[-self.action_dim:] = self.mean[-2*self.action_dim:-self.action_dim]
mean=self.mean
var = np.tile(np.square(self.env.action_space.high[0]-self.env.action_space.low[0]) / 16, [self.sol_dim]) #/16
# Add trajectories using actions suggested by actors
actor_trajectories = np.zeros((self.actor_traj,self.sol_dim))
actor_state = torch.FloatTensor(actor_state).to(device)
actor_state_m = actor_state[0,:].reshape(1,-1)
actor_state_m2 = actor_state[1,:].reshape(1,-1)
for h in range(self.horizon):
actor_actions_m = self.critic.ac.act_batch(actor_state_m.reshape(1,-1)[:,1:],deterministic=True)
actor_state_m = self.models.get_forward_prediction_random_ensemble_t(actor_state_m[:,1:],actor_actions_m)
actor_trajectories[0,h*self.action_dim:(h+1)*self.action_dim]=actor_actions_m.detach().cpu().numpy()
actor_actions = self.critic.ac.act_batch(actor_state_m2[:,1:])
actor_state_m2 = self.models.get_forward_prediction_random_ensemble_t(actor_state_m2[:,1:],actor_actions)
actor_trajectories[1:,h*self.action_dim:(h+1)*self.action_dim]=actor_actions.detach().cpu().numpy()
X = stats.truncnorm(-2, 2, loc=np.zeros_like(mean), scale= np.ones_like(mean))
t = 0
while (t < self.max_iters):
lb_dist, ub_dist = mean - self.lb, self.ub - mean
constrained_var = np.minimum(np.minimum(np.square(lb_dist / 2), np.square(ub_dist / 2)), var)
action_traj = (X.rvs(size=(self.N, self.sol_dim)) * np.sqrt(constrained_var) + mean).astype(np.float32)
action_traj = np.concatenate((action_traj,actor_trajectories),axis=0)
# Multiple particles go through the same action sequence
action_traj = np.repeat(action_traj,self.particles,axis=0)
# actions clipped between -1 and 1
action_traj = np.clip(action_traj, -1, 1)
states = torch.from_numpy(np.expand_dims(curr_state.copy(),axis=0)).float().to(device)
actions = np.repeat(np.expand_dims(action_traj,axis=0),self.models.model.network_size,axis=0)
actions = torch.FloatTensor(actions).to(device)
for h in range(self.horizon):
states_h = states[h,:,:,1:]
next_states = self.models.get_forward_prediction_t(states_h, actions[:,:, h * self.action_dim:(h + 1) * self.action_dim])
states = torch.cat((states,next_states.unsqueeze(0)), axis=0)
states = states.cpu().detach().numpy()
done = np.zeros((states.shape[1],states.shape[2],1)) # Shape [Ensembles, (actor_traj+N)*particles,1]
# Set the reward of terminated states to zero
for h in range(1,self.horizon+1):
for ens in range(states.shape[1]):
done[ens,:,:] = np.logical_or(done[ens,:,:],self.termination_function(None,None,states[h,ens,:,1:]))
not_done = 1-done[ens,:,:]
states[h,ens,:,0]*=not_done.astype(np.float32).reshape(-1)
# Find average cost of each trajectory
returns = np.zeros((self.N+self.actor_traj,))
safety_costs = np.zeros((self.N+self.actor_traj,))
actions_H = torch.from_numpy(action_traj[:, (self.reward_horizon - 1) * self.action_dim:(
self.reward_horizon) * self.action_dim].reshape((self.N+self.actor_traj)*self.particles, -1)).float().to(device)
actions_H = actions_H.repeat(self.models.model.network_size, 1)
# actions_H = actions_H.repeat_interleave(repeats=states.shape[1],dim=0)
states_H = torch.from_numpy(
states[self.reward_horizon-1, :, :, 1:].reshape((self.N+self.actor_traj)*self.particles*states.shape[1], -1)).float().to(device)
terminal_Q_rewards = self.critic.ac.q1(
states_H, actions_H).cpu().detach().numpy()
terminal_Q_rewards = terminal_Q_rewards.reshape(states.shape[1],-1)
states_flatten = states[:,:,:,1:].reshape(-1,self.obs_dim)
all_safety_costs = np.zeros((states_flatten.shape[0],))
all_safety_costs = env.get_observation_cost(states_flatten)
all_safety_costs = all_safety_costs.reshape(states.shape[0],states.shape[1],states.shape[2],1)
for ensemble in self.models.model.elite_model_idxes:
done[ensemble,:,:] = np.logical_or(done[ensemble,:,:],self.termination_function(None,None,states[self.horizon-1,ensemble,:,1:]))
not_done = 1-done[ensemble,:,:]
q_rews = terminal_Q_rewards[ensemble,:]*not_done.reshape(-1)
n = np.arange(0,self.N+self.actor_traj,1).astype(int)
for particle in range(self.particles):
returns[n]+= np.sum(states[:self.reward_horizon, ensemble, n*self.particles+particle, 0],axis=0) + q_rews.reshape(-1)[n*self.particles+particle]
safety_costs[n] = np.maximum(safety_costs,np.sum(all_safety_costs[0:self.horizon, ensemble, n*self.particles+particle, 0],axis=0))
returns /= (states.shape[1]*self.particles)
costs = -returns
if((safety_costs<self.safety_threshold).sum()<self.minimal_elites):
safety_rewards = -safety_costs
max_safety_reward = np.max(safety_rewards)
score = np.exp(self.kappa*(safety_rewards-max_safety_reward))
indices = np.argsort(safety_costs)
mean = np.sum(action_traj[np.arange(0,self.N+self.actor_traj,1).astype(int)*self.particles,:]*score.reshape(-1,1),axis=0)/(np.sum(score)+1e-10)
new_var = np.average((action_traj[np.arange(0,self.N+self.actor_traj,1).astype(int)*self.particles,:]-mean)**2, weights=score.reshape(-1),axis=0)
else:
costs = (safety_costs<self.safety_threshold)*costs + (safety_costs>=self.safety_threshold)*1e4
indices = np.arange(costs.shape[0])
indices = | np.array([idx for idx in indices if costs[idx]<1e3]) | numpy.array |
'''
Created on Nov. 11, 2019
Mosaik interface for the Distribution State Estimation.
@file simulator_dse.py
@author <NAME>
@date 2019.11.11
@version 0.1
@company University of Alberta - Computing Science
'''
import mosaik_api
import numpy as np
import pandas as pd
import os
import sys
import csv
from ast import literal_eval
import scipy.io as spio
import math
from pathlib import Path
META = {
'models': {
'Estimator': {
'public': True,
'params': ['idt', 'ymat_file', 'devs_file', 'acc_period', 'max_iter', 'threshold', 'baseS', 'baseV', 'baseNode', 'basePF', 'se_period', 'se_result', 'pseudo_loads', 'verbose'],
'attrs': ['v', 't'],
},
},
}
class DSESim(mosaik_api.Simulator):
def __init__(self):
super().__init__(META)
self.entities = {}
self.next = {}
self.instances = {}
self.devParams = {}
self.data = {}
def init(self, sid, eid_prefix=None, step_size=1, verbose=0):
if eid_prefix is not None:
self.eid_prefix = eid_prefix
self.sid = sid
self.step_size = step_size
self.verbose = verbose
self.cktState = {}
self.MsgCount = 0
return self.meta
def create(self, num, model, idt, ymat_file, devs_file, acc_period, max_iter, threshold, baseS, baseV, baseNode, basePF, se_period, pseudo_loads, se_result):
if (self.verbose > 0): print('simulator_dse::create', num, model, idt)
eid = '%s%s' % (self.eid_prefix, idt)
self.entities[eid] = {}
self.entities[eid]['ymat_file'] = ymat_file
self.entities[eid]['devs_file'] = devs_file
self.entities[eid]['acc_period'] = acc_period
self.entities[eid]['max_iter'] = max_iter
self.entities[eid]['threshold'] = threshold
self.entities[eid]['type'] = model
self.entities[eid]['baseS'] = baseS
self.entities[eid]['baseV'] = baseV
self.entities[eid]['baseI'] = baseS/baseV
self.entities[eid]['baseY'] = baseS/np.power(baseV,2)
self.entities[eid]['baseNode'] = baseNode
self.entities[eid]['basePF'] = basePF
self.entities[eid]['se_period'] = se_period
self.entities[eid]['pseudo_loads'] = pseudo_loads
self.entities[eid]['se_result'] = se_result
self.entities[eid]['vecZ'] = {}
self.entities[eid]['nodes'] = 0
self.entities[eid]['df_devs'] = pd.DataFrame({})
''' read ymat_file and get number of nodes '''
self.entities[eid]['ymat_data'] = np.load(ymat_file)
self.entities[eid]['nodes'] = len(self.entities[eid]['ymat_data'])
self.entities[eid]['ymat_data'] = self.entities[eid]['ymat_data'] / self.entities[eid]['baseY']
if (self.verbose > 0): print('DSESim::create Nodes YMat:', self.entities[eid]['nodes'])
''' get device list '''
self.entities[eid]['df_devs'] = pd.read_csv(devs_file, delimiter = ',', index_col = 'idn')
self.entities[eid]['df_devs']= pd.concat([self.entities[eid]['df_devs'], pd.DataFrame({'SPA':[], # true power phase A
'SQA':[], # reactive power phase A
'SPB':[], # true power phase B
'SQB':[], # reactive power phase B
'SPC':[], # true power phase C
'SQC':[], # reactive power phase C
'VMA':[], # voltage magnitude phase A
'VAA':[], # voltage angle phase A
'VMB':[], # voltage magnitude phase B
'VAB':[], # voltage angle phase B
'VMC':[], # voltage magnitude phase C
'VAC':[], # voltage angle phase C
'IMA':[], # current magnitude phase A
'IAA':[], # current angle phase A
'IMB':[], # current magnitude phase B
'IAB':[], # current angle phase B
'IMC':[], # current magnitude phase C
'IAC':[], # current angle phase C
'TS':[]})] # last time stamp
, sort=False)
if (self.verbose > 1):
print('DSESim::create Entities:')
print(self.entities[eid]['df_devs'])
''' create vecZ and vecZType '''
df_devs = self.entities[eid]['df_devs']
nr_phasors = (df_devs[df_devs['type'] == 'phasor']).shape[0]
self.entities[eid]['vecZ'] = np.zeros((int(self.entities[eid]['nodes']) - 3) + # P values
(int(self.entities[eid]['nodes']) - 3) + # Q values
(nr_phasors*3 ) + # Voltage magnitude
(nr_phasors*3 - 1), # Voltage angles
np.float64)
entities = []
self.data[eid] = {}
self.data[eid]['v'] = 0
self.data[eid]['t'] = 0
entities.append({'eid': eid, 'type': model})
return entities
def step(self, time, inputs):
if (self.verbose > 5): print('simulator_dse::step INPUT', time, inputs)
next_step = time + self.step_size
''' prepare data to be used in get_data '''
#self.data = {}
''' prepare data to be used in get_data and calculate system state '''
''' for each instance '''
for dse_eid, attrs in inputs.items():
attr_v = attrs['v']
df_devs = self.entities[dse_eid]['df_devs']
''' for each smartmeter/phasor '''
for dev_instance, param in attr_v.items():
if (param != None and param != 'null' and param != "None"):
self.MsgCount += 1
''' change to dict because NS-3 need to transmit string '''
if isinstance(param, str):
param = literal_eval(param)
dev_idn = (param['IDT']).split("_")[1]
dev_type = param['TYPE']
dev_name = dev_instance.split(".")[1]
''' store values already per-unit '''
if (self.verbose > 1):
print('simulator_dse::step INPUT PROCESSED: ',
'TIME:', time,
'TIME_Sent:', param['TS'],
'IDN:', dev_idn,
'TYPE:', dev_type,
'NAME:', dev_name,
'PARMS:', param)
for dev_param_key in param.keys():
if dev_param_key == 'VA' and dev_type == 'Phasor':
df_devs.at[int(dev_idn), 'VMA'] = param['VA'][0] / self.entities[dse_eid]['baseV']
df_devs.at[int(dev_idn), 'VAA'] = param['VA'][1]
elif dev_param_key == 'VB' and dev_type == 'Phasor':
df_devs.at[int(dev_idn), 'VMB'] = param['VB'][0] / self.entities[dse_eid]['baseV']
df_devs.at[int(dev_idn), 'VAB'] = param['VB'][1]
elif dev_param_key == 'VC' and dev_type == 'Phasor':
df_devs.at[int(dev_idn), 'VMC'] = param['VC'][0] / self.entities[dse_eid]['baseV']
df_devs.at[int(dev_idn), 'VAC'] = param['VC'][1]
elif dev_param_key == 'IA':
df_devs.at[int(dev_idn), 'IMA'] = param['IA'][0] / self.entities[dse_eid]['baseI']
df_devs.at[int(dev_idn), 'IAA'] = param['IA'][1]
elif dev_param_key == 'IB':
df_devs.at[int(dev_idn), 'IMB'] = param['IB'][0] / self.entities[dse_eid]['baseI']
df_devs.at[int(dev_idn), 'IAB'] = param['IB'][1]
elif dev_param_key == 'IC':
df_devs.at[int(dev_idn), 'IMC'] = param['IC'][0] / self.entities[dse_eid]['baseI']
df_devs.at[int(dev_idn), 'IAC'] = param['IC'][1]
elif dev_param_key == 'SPA':
df_devs.at[int(dev_idn), 'SPA'] = param['SPA'] / (self.entities[dse_eid]['baseS']*1000)
df_devs.at[int(dev_idn), 'SQA'] = df_devs.at[int(dev_idn), 'SPA'] * np.tan(np.arccos(self.entities[dse_eid]['basePF'] ))
elif dev_param_key == 'SPB':
df_devs.at[int(dev_idn), 'SPB'] = param['SPB'] / (self.entities[dse_eid]['baseS']*1000)
df_devs.at[int(dev_idn), 'SQB'] = df_devs.at[int(dev_idn), 'SPB'] * np.tan(np.arccos(self.entities[dse_eid]['basePF'] ))
elif dev_param_key == 'SPC':
df_devs.at[int(dev_idn), 'SPC'] = param['SPC'] / (self.entities[dse_eid]['baseS']*1000)
df_devs.at[int(dev_idn), 'SQC'] = df_devs.at[int(dev_idn), 'SPC'] * np.tan(np.arccos(self.entities[dse_eid]['basePF'] ))
elif dev_param_key == 'TS':
df_devs.at[int(dev_idn), 'TS'] = param['TS']
elif ((dev_param_key == 'VA') or (dev_param_key == 'VB') or (dev_param_key == 'VC')) and (dev_type != 'Phasor'):
pass
elif (dev_param_key == 'IDT') or (dev_param_key == 'TYPE'):
pass
else:
raise Exception('dev_param_key value unknown:', dev_param_key, "Device:", dev_name)
if (0 == time % self.entities[dse_eid]['acc_period']):
self.data[dse_eid]['v'] = self.MsgCount
self.data[dse_eid]['t'] = time
self.MsgCount = 0
#(self.entities[dse_eid]['vecZ'], _) = self.createZVectors(dse_eid, len(self.entities[dse_eid]['vecZ']))
# se_period = 1000
# if next_step == 500:
# print("Check the phasors!")
if time > 0 and time % self.entities[dse_eid]['se_period'] == 0:
# if time % se_period == 0:
z, ztype, error_cov = self.get_measurements(df_devs, time)
stop_counter = 0
while stop_counter < 5:
v_wls, iter_number = self.state_estimation(self.entities[dse_eid]['ymat_data'], z, ztype, error_cov,
self.entities[dse_eid]['max_iter'], self.entities[dse_eid]['threshold'])
if iter_number > 1 & iter_number < 10:
stop_counter = 5
else:
stop_counter += 1
# array_name = 'v_wls_{}'.format(int(time / self.entities[dse_eid]['se_period']))
array_name = 'v_wls'
# if Path('C:/OpenDSS/DSSE33DetailedMultiPhase/wls_results.mat').is_file():
if time / self.entities[dse_eid]['se_period'] > 1:
mat = spio.loadmat(self.entities[dse_eid]['se_result'], squeeze_me=True)
mat[array_name] = np.vstack((mat[array_name], v_wls))
spio.savemat(self.entities[dse_eid]['se_result'], mat)
else:
spio.savemat(self.entities[dse_eid]['se_result'], {array_name: v_wls})
return next_step
def state_estimation(self, ybus, z, ztype, err_cov, iter_max, threshold):
ztype= np.array(ztype)
n = len(ybus) # number of single phase nodes
g = np.real(ybus) # real part of the admittance matrix
b = np.imag(ybus) # imaginary art of the admittance matrix
x = np.concatenate(
([-2 * math.pi / 3, -4 * math.pi / 3],
np.tile([0, -2 * math.pi / 3, -4 * math.pi / 3], math.floor(n / 3) - 1),
np.ones(n) * (1 + .000001 * | np.random.randn(n) | numpy.random.randn |
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import time
import pybullet as p
from . import kuka
import random
import pybullet_data
from pkg_resources import parse_version
maxSteps = 1000
RENDER_HEIGHT = 720
RENDER_WIDTH = 960
class KukaCamGymEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self,
urdfRoot=pybullet_data.getDataPath(),
actionRepeat=1,
isEnableSelfCollision=True,
renders=False,
isDiscrete=False):
self._timeStep = 1./240.
self._urdfRoot = urdfRoot
self._actionRepeat = actionRepeat
self._isEnableSelfCollision = isEnableSelfCollision
self._observation = []
self._envStepCounter = 0
self._renders = renders
self._width = 341
self._height = 256
self._isDiscrete=isDiscrete
self.terminated = 0
self._p = p
if self._renders:
cid = p.connect(p.SHARED_MEMORY)
if (cid<0):
p.connect(p.GUI)
p.resetDebugVisualizerCamera(1.3,180,-41,[0.52,-0.2,-0.33])
else:
p.connect(p.DIRECT)
#timinglog = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "kukaTimings.json")
self._seed()
self.reset()
observationDim = len(self.getExtendedObservation())
#print("observationDim")
#print(observationDim)
observation_high = np.array([np.finfo(np.float32).max] * observationDim)
if (self._isDiscrete):
self.action_space = spaces.Discrete(7)
else:
action_dim = 3
self._action_bound = 1
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(low=0, high=255, shape=(self._height, self._width, 4))
self.viewer = None
def _reset(self):
self.terminated = 0
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=150)
p.setTimeStep(self._timeStep)
p.loadURDF(os.path.join(self._urdfRoot,"plane.urdf"),[0,0,-1])
p.loadURDF(os.path.join(self._urdfRoot,"table/table.urdf"), 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0)
xpos = 0.5 +0.2*random.random()
ypos = 0 +0.25*random.random()
ang = 3.1415925438*random.random()
orn = p.getQuaternionFromEuler([0,0,ang])
self.blockUid =p.loadURDF(os.path.join(self._urdfRoot,"block.urdf"), xpos,ypos,-0.1,orn[0],orn[1],orn[2],orn[3])
p.setGravity(0,0,-10)
self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep)
self._envStepCounter = 0
p.stepSimulation()
self._observation = self.getExtendedObservation()
return np.array(self._observation)
def __del__(self):
p.disconnect()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def getExtendedObservation(self):
#camEyePos = [0.03,0.236,0.54]
#distance = 1.06
#pitch=-56
#yaw = 258
#roll=0
#upAxisIndex = 2
#camInfo = p.getDebugVisualizerCamera()
#print("width,height")
#print(camInfo[0])
#print(camInfo[1])
#print("viewMatrix")
#print(camInfo[2])
#print("projectionMatrix")
#print(camInfo[3])
#viewMat = camInfo[2]
#viewMat = p.computeViewMatrixFromYawPitchRoll(camEyePos,distance,yaw, pitch,roll,upAxisIndex)
viewMat = [-0.5120397806167603, 0.7171027660369873, -0.47284144163131714, 0.0, -0.8589617609977722, -0.42747554183006287, 0.28186774253845215, 0.0, 0.0, 0.5504802465438843, 0.8348482847213745, 0.0, 0.1925382763147354, -0.24935829639434814, -0.4401884973049164, 1.0]
#projMatrix = camInfo[3]#[0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
projMatrix = [0.75, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
img_arr = p.getCameraImage(width=self._width,height=self._height,viewMatrix=viewMat,projectionMatrix=projMatrix)
rgb=img_arr[2]
np_img_arr = np.reshape(rgb, (self._height, self._width, 4))
self._observation = np_img_arr
return self._observation
def _step(self, action):
if (self._isDiscrete):
dv = 0.01
dx = [0,-dv,dv,0,0,0,0][action]
dy = [0,0,0,-dv,dv,0,0][action]
da = [0,0,0,0,0,-0.1,0.1][action]
f = 0.3
realAction = [dx,dy,-0.002,da,f]
else:
dv = 0.01
dx = action[0] * dv
dy = action[1] * dv
da = action[2] * 0.1
f = 0.3
realAction = [dx,dy,-0.002,da,f]
return self.step2( realAction)
def step2(self, action):
for i in range(self._actionRepeat):
self._kuka.applyAction(action)
p.stepSimulation()
if self._termination():
break
#self._observation = self.getExtendedObservation()
self._envStepCounter += 1
self._observation = self.getExtendedObservation()
if self._renders:
time.sleep(self._timeStep)
#print("self._envStepCounter")
#print(self._envStepCounter)
done = self._termination()
reward = self._reward()
#print("len=%r" % len(self._observation))
return | np.array(self._observation) | numpy.array |
from hydroDL.post import axplot, figplot
from hydroDL.new import fun
from hydroDL.app import waterQuality
import importlib
import matplotlib.pyplot as plt
import numpy as np
import random
import scipy
from scipy.special import gamma, loggamma
import torch
import torch.nn.functional as F
from torch import exp, lgamma
# fake data
nq = 10
rho = 365
nt = 1000
nbatch = 30
p = np.random.random([nq, nt])
aAry = np.exp((np.random.random(nq)-0.5)*2)
bAry = np.exp((np.random.random(nq)-0.5)*2)
# numpy
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
qMat = np.ndarray([10, 365])
for k in range(10):
a = aAry[k]
b = bAry[k]
x = ( | np.arange(365) | numpy.arange |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi time series forecasting problem."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import timeseries_data_generator
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
class TimeseriesProblem(problem.Problem):
"""Base Problem for multi timeseries datasets."""
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.RealEncoder(),
"targets": text_encoder.RealEncoder()
}
@property
def is_generate_per_split(self):
# generate_data will shard the data into TRAIN and EVAL for us.
return False
@property
def dataset_splits(self):
"""Splits of data to produce and number the output shards for each."""
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": self.num_train_shards,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": self.num_eval_shards,
}, {
"split": problem.DatasetSplit.TEST,
"shards": self.num_test_shards,
}]
@property
def num_train_shards(self):
"""Number of training shards."""
return 9
@property
def num_eval_shards(self):
"""Number of eval shards."""
return 1
@property
def num_test_shards(self):
"""Number of test shards."""
return 1
@property
def num_series(self):
"""Number of timeseries."""
raise NotImplementedError()
@property
def num_input_timestamps(self):
"""Number of timestamps to include in the input."""
raise NotImplementedError()
@property
def num_target_timestamps(self):
"""Number of timestamps to include in the target."""
raise NotImplementedError()
def timeseries_dataset(self):
"""Multi-timeseries data [ timestamps , self.num_series ] ."""
raise NotImplementedError()
def eval_metrics(self):
eval_metrics = [metrics.Metrics.RMSE]
return eval_metrics
@property
def normalizing_constant(self):
"""Constant by which all data will be multiplied to be more normalized."""
return 1.0 # Adjust so that your loss is around 1 or 10 or 100, not 1e+9.
def preprocess_example(self, example, unused_mode, unused_hparams):
# Time series are flat on disk, we un-flatten them back here.
flat_inputs = example["inputs"]
flat_targets = example["targets"]
c = self.normalizing_constant
# Tensor2Tensor models expect [height, width, depth] examples, here we
# use height for time and set width to 1 and num_series is our depth.
example["inputs"] = tf.reshape(
flat_inputs, [self.num_input_timestamps, 1, self.num_series]) * c
example["targets"] = tf.reshape(
flat_targets, [self.num_target_timestamps, 1, self.num_series]) * c
return example
def generate_samples(self, data_dir, tmp_dir, dataset_split):
del data_dir
del tmp_dir
del dataset_split
series = self.timeseries_dataset()
num_timestamps = len(series)
# Generate samples with num_input_timestamps for "inputs" and
# num_target_timestamps in the "targets".
for split_index in range(self.num_input_timestamps,
num_timestamps - self.num_target_timestamps + 1):
inputs = series[split_index -
self.num_input_timestamps:split_index, :].tolist()
targets = series[split_index:split_index +
self.num_target_timestamps, :].tolist()
# We need to flatten the lists on disk for tf,Example to work.
flat_inputs = [item for sublist in inputs for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
example_keys = ["inputs", "targets"]
ex_dict = dict(zip(example_keys, [flat_inputs, flat_targets]))
yield ex_dict
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.input_modality = {"inputs": (registry.Modalities.REAL, self.num_series)}
p.target_modality = (registry.Modalities.REAL, self.num_series)
p.input_space_id = problem.SpaceID.REAL
p.target_space_id = problem.SpaceID.REAL
def generate_data(self, data_dir, tmp_dir, task_id=-1):
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.TEST: self.test_filepaths,
}
split_paths = [(split["split"], filepath_fns[split["split"]](
data_dir, split["shards"], shuffled=False))
for split in self.dataset_splits]
all_paths = []
for _, paths in split_paths:
all_paths.extend(paths)
if self.is_generate_per_split:
for split, paths in split_paths:
generator_utils.generate_files(
self.generate_samples(data_dir, tmp_dir, split), paths)
else:
generator_utils.generate_files(
self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN),
all_paths)
generator_utils.shuffle_dataset(all_paths)
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.float32),
"targets": tf.VarLenFeature(tf.float32),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
@registry.register_problem
class TimeseriesToyProblem(TimeseriesProblem):
"""Timeseries problem with a toy dataset."""
@property
def num_train_shards(self):
"""Number of training shards."""
return 1
@property
def num_eval_shards(self):
"""Number of eval shards."""
return 1
@property
def num_test_shards(self):
"""Number of eval shards."""
return 0
@property
def num_series(self):
"""Number of timeseries."""
return 2
@property
def num_input_timestamps(self):
"""Number of timestamps to include in the input."""
return 2
@property
def num_target_timestamps(self):
"""Number of timestamps to include in the target."""
return 2
def timeseries_dataset(self):
series = [[float(i + n) for n in range(self.num_series)] for i in range(10)]
return | np.array(series) | numpy.array |
#!/usr/bin/ python3
print('''\x1b[32m
██████╗ █████╗ ███╗ ███╗ █████╗ ███╗ ██╗███████╗████████╗
██╔══██╗██╔══██╗████╗ ████║██╔══██╗████╗ ██║██╔════╝╚══██╔══╝
██████╔╝███████║██╔████╔██║███████║██╔██╗ ██║█████╗ ██║
██╔══██╗██╔══██║██║╚██╔╝██║██╔══██║██║╚██╗██║██╔══╝ ██║
██║ ██║██║ ██║██║ ╚═╝ ██║██║ ██║██║ ╚████║███████╗ ██║
╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚══════╝ ╚═╝\x1b[35m
╔╦╗┌─┐ ┌┐┌┌─┐┬ ┬┌─┐ ╔═╗┬─┐┌─┐┌┬┐┌─┐┬┌┐┌ ╔╦╗┌─┐┌─┐┬┌─┐┌┐┌
║║├┤ ││││ │└┐┌┘│ │ ╠═╝├┬┘│ │ │ ├┤ ││││ ║║├┤ └─┐││ ┬│││
═╩╝└─┘ ┘└┘└─┘ └┘ └─┘ ╩ ┴└─└─┘ ┴ └─┘┴┘└┘ ═╩╝└─┘└─┘┴└─┘┘└┘
\u001b[31mAuthors: \x1b[33m<NAME> and <NAME>
\u001b[31mDate: \x1b[33m31-May-2017
\u001b[31mCorrespondace: \x1b[33<EMAIL>
\u001b[31mURL: \x1b[33mhttps://sarisabban.github.io/RamaNet
\x1b[36m---------------------------------------------------------\x1b[0m''')
import os
import re
import sys
import h5py
import time
import glob
import math
import tqdm
import gzip
import keras
import random
import sklearn
import Bio.PDB
import datetime
import warnings
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from pyrosetta import *
from pyrosetta.toolbox import *
from keras.optimizers import Adam
from keras.models import Sequential, Model
from keras.losses import BinaryCrossentropy
from keras.layers.convolutional import Conv2D
from keras.layers import Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input, Dense, Reshape, Flatten
from keras.layers import UpSampling2D, BatchNormalization
from keras.layers import Dropout, GlobalMaxPooling2D, Conv2DTranspose
# Silence Tensorflow, Keras, and initialise PyRosetta
def warn(*args, **kwargs): pass
warnings.warn = warn
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
init('-out:level 0')
print('\x1b[36m--------------------------------------------------------\x1b[0m')
# Setup arguments
parser = argparse.ArgumentParser(description='De Novo Protein Design Neural Network')
parser.add_argument('-d', '--dataset', nargs='+', metavar='', help='Build the Backbone or Sequence datasets')
parser.add_argument('-f', '--frag', action='store_true', help='Build the Fragment dataset')
parser.add_argument('-tb', '--TrainBack', action='store_true', help='Train the Backbone neural network')
parser.add_argument('-tf', '--TrainFrag', action='store_true', help='Train the Fragment neural network')
parser.add_argument('-ts', '--TrainSeq', action='store_true', help='Train the Sequence neural network')
args = parser.parse_args()
class Dataset():
''' Build a machine learning dataset of protein structures '''
def Database(self, TempDIR, FinalDIR):
'''
Downloads the entire PDB database from https://www.wwpdb.org/
moves all files into one directory, then uncompresses all the files
Generates a directory which contains all .PDB structure files
'''
print('\x1b[33m[.] Downloading PDB database...\x1b[0m')
web = 'rsync.wwpdb.org::ftp/data/structures/divided/pdb/'
os.system('rsync -rlpt -q -v -z --delete --port=33444 {} {}'
.format(web, TempDIR))
print('\x1b[32m[+] Download complete\x1b[0m')
os.mkdir(FinalDIR)
filelist = os.listdir(TempDIR)
print('\x1b[33m[.] Moving files...\x1b[0m')
for directories in tqdm.tqdm(filelist):
files = os.listdir('{}/{}'.format(TempDIR, directories))
for afile in files:
location = ('{}/{}/{}'.format(TempDIR, directories, afile))
os.rename(location, '{}/{}'.format(FinalDIR, afile))
os.system('rm -r ./{}'.format(TempDIR))
print('\x1b[32m[+] Moving complete\x1b[0m')
def Extract(self, directory):
'''
Extracts all the .ent.gz files and separate all chains and save them
into seperate .pdb files. Replaces each .ent.gz file with the .pdb
file of each chain
'''
print('\x1b[33m[.] Extracting files...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
io = Bio.PDB.PDBIO()
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
TheName = TheFile.split('.')[0].split('pdb')[1].upper()
InFile = gzip.open(TheFile, 'rt')
structure = Bio.PDB.PDBParser(QUIET=True)\
.get_structure(TheName, InFile)
count = 0
for chain in structure.get_chains():
io.set_structure(chain)
io.save(structure.get_id()+'_'+chain.get_id()+'.pdb')
os.remove(TheFile)
except Exception as TheError:
print('\x1b[31m[-] Failed to extract\t{}\x1b[33m: {}\x1b[0m'
.format(TheFile.upper(), str(TheError)))
os.remove(TheFile)
os.chdir(current)
def NonProtein(self, directory):
''' Remove non-protein structures '''
print('\x1b[33m[.] Deleting none-protein structures...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
structure = Bio.PDB.PDBParser(QUIET=True)\
.get_structure('X', TheFile)
ppb = Bio.PDB.Polypeptide.PPBuilder()
Type = ppb.build_peptides(structure, aa_only=True)
if Type == []: os.remove(TheFile)
else: continue
except: os.remove(TheFile)
os.chdir(current)
def Size(self, directory, Size_From, Size_To):
''' Remove structures not within defined size '''
print('\x1b[33m[.] Removing unwanted structure sizes...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
parser = Bio.PDB.PDBParser()
structure = parser.get_structure('X', TheFile)
model = structure[0]
dssp = Bio.PDB.DSSP(model, TheFile, acc_array='Wilke')
for aa in dssp: length = aa[0]
if length >= int(Size_To) or length <= int(Size_From):
os.remove(TheFile)
except: print('\x1b[31m[-] Error in finding protein size\x1b[0m')
os.chdir(current)
def Break(self, directory):
''' Remove structures with a broken (non-continuous) chains '''
print('\x1b[33m[.] Removing non-continuous structures...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
structure = Bio.PDB.PDBParser(QUIET=True)\
.get_structure('X', TheFile)
ppb = Bio.PDB.Polypeptide.PPBuilder()
Type = ppb.build_peptides(structure, aa_only=True)
try:
x = Type[1]
os.remove(TheFile)
except: continue
os.chdir(current)
def Loops(self, directory, LoopLength):
'''
Remove structures that have loops that are larger than a
spesific length
'''
print('\x1b[33m[.] Removing structures with long loops...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
try:
parser = Bio.PDB.PDBParser()
structure = parser.get_structure('X', TheFile)
model = structure[0]
dssp = Bio.PDB.DSSP(model, TheFile, acc_array='Wilke')
SS = list()
for res in dssp:
ss = res[2]
if ss == '-' or ss == 'T' or ss == 'S': SS.append('L')
else: SS.append('.')
loops = ''.join(SS).split('.')
loops = [item for item in loops if item]
LargeLoop = None
for item in loops:
if len(item) <= LoopLength: continue
else: LargeLoop = 'LargeLoop'
if LargeLoop == 'LargeLoop': os.remove(TheFile)
else: continue
except: os.remove(TheFile)
os.chdir(current)
def Renumber(self, directory):
''' Renumber structures starting at 1 '''
print('\x1b[33m[.] Renumbering structures...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
pdb = open(TheFile, 'r')
PDB = open(TheFile+'X', 'w')
count = 0
num = 0
AA2 = None
for line in pdb:
count += 1
AA1 = line[23:27]
if not AA1 == AA2: num += 1
final_line =line[:7]+'{:4d}'.format(count)+line[11:17]+\
line[17:21]+'A'+'{:4d}'.format(num)+line[26:]
AA2 = AA1
PDB.write(final_line)
PDB.close()
os.remove(TheFile)
os.rename(TheFile+'X', TheFile)
os.chdir(current)
def Rg(self, directory, RGcutoff):
''' Remove structures that are below the Raduis of Gyration's value '''
print('\x1b[33m[.] Removing structure low Rg values...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
mass = list()
Structure = open(TheFile, 'r')
for line in Structure:
line = line.split()
if line[0] == 'TER' or line[0] == 'END': continue
else:
if line[-1] == 'C': mass.append(12.0107)
elif line[-1] == 'O': mass.append(15.9994)
elif line[-1] == 'N': mass.append(14.0067)
elif line[-1] == 'S': mass.append(32.0650)
elif line[-1] == 'H': mass.append(1.00794)
else: continue
coord = list()
p = Bio.PDB.PDBParser()
structure = p.get_structure('X', TheFile)
for model in structure:
for chain in model:
for residue in chain:
for atom in residue: coord.append(atom.get_coord())
xm = [(m*i, m*j, m*k) for (i, j, k), m in zip(coord, mass)]
tmass = sum(mass)
rr = sum(mi*i + mj*j + mk*k for (i, j, k), (mi, mj, mk)\
in zip(coord, xm))
mm = sum((sum(i)/tmass)**2 for i in zip( * xm))
rg = math.sqrt(rr/tmass-mm)
if rg <= RGcutoff: os.remove(TheFile)
else: continue
os.chdir(current)
def Clean(self, directory):
''' Clean each structure within a directory '''
print('\x1b[33m[.] Cleaning structures...\x1b[0m')
os.mkdir('PDBCleaned')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
CurFile = open(TheFile, 'r')
NewFile = open('Clean-{}'.format(TheFile), 'a')
for line in CurFile:
if line.split()[0] == 'ATOM': NewFile.write(line)
CurFile.close()
NewFile.close()
os.system('mv Clean-{} ../PDBCleaned'.format(TheFile))
os.chdir(current)
def Path(self, directory, path):
''' Generate a file with the path to each file '''
print('\x1b[33m[.] Generating paths...\x1b[0m')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
PathFile = open('PDB.list', 'a')
for TheFile in tqdm.tqdm(pdbfilelist):
line = '{}/PDBCleaned/{}\n'.format(path, TheFile)
PathFile.write(line)
os.system('mv PDB.list ../')
os.chdir(current)
def RelaxHPC(self, path, cores):
'''
Generate a PBS job scheduler to perform each structure
relax on a HPC
'''
HPCfile = open('relax.pbs', 'w')
HPCfile.write('#!/bin/bash\n')
HPCfile.write('#PBS -N Relax\n')
HPCfile.write('#PBS -q fat\n')
HPCfile.write('#PBS -l select=1:ncpus=1\n')
HPCfile.write('#PBS -j oe\n')
HPCfile.write('#PBS -J 1-{}\n'.format(str(cores)))
HPCfile.write('cd $PBS_O_WORKDIR\n')
HPCfile.write('mkdir PDBRelaxed\n')
HPCfile.write('cd PDBRelaxed\n')
HPCfile.write('''thefile=$(awk -v "line=${PBS_ARRAY_INDEX}"''')
HPCfile.write(''''NR == line { print; exit }' ../PDB.list)\n''')
HPCfile.write('{}/main/source/bin/'.format(path))
HPCfile.write('relax.default.linuxgccrelease')
HPCfile.write('-relax:thorough -nstruct 100 -database ')
HPCfile.write('{}/main/database -s $thefile'.format(path))
print('\x1b[32m[+] Generated HPC job submission file\x1b[0m')
def Relax(self, directory):
''' Relax each structure in a directory on a local computer '''
print('\x1b[33m[.] Relaxing structures...\x1b[0m')
os.mkdir('PDBRelaxed')
current = os.getcwd()
pdbfilelist = os.listdir(directory)
os.chdir(directory)
for TheFile in tqdm.tqdm(pdbfilelist):
for i in range(1, 101):
scorefxn = get_fa_scorefxn()
relax = pyrosetta.rosetta.protocols.relax.FastRelax()
relax.set_scorefxn(scorefxn)
pose = pose_from_pdb(TheFile)
relax.apply(pose)
pose.dump_pdb('Relaxed{}-{}'.format(i, TheFile))
os.system('mv Relaxed{}-{} ../PDBRelaxed'.format(i, TheFile))
os.chdir(current)
def C_Max(self, filename):
''' Find the maximum value of the Distance Map in a dataset '''
max_in_line = []
with open(filename, 'r') as f:
next(f)
for line in f:
line = line.strip().split(',')[1:]
line = [float(item) for item in line]
max_in_line.append(max(line))
maximum = max(max_in_line)
print('\x1b[32m[+] Contact Map maximum value: {}\x1b[0m'\
.format(maximum))
return(maximum)
def DatasetPSCM(self, directory):
'''
Compile a dataset of each residue's phi and psi angles and another
dataset of the contact map for each structure. This dataset is padded
with zeros.
'''
a = 'Compiling phi and psi angles dataset'
b = 'as well as a distance matrix dataset'
text = a+b
print('\x1b[32m{}\x1b[0m'.format(text))
# Setup dataset header for angles
headerPS = ['PDB_ID']
for i in range(1, 150+1):
headerPS.append(',phi_{},psi_{}'.format(i, i))
headerPS = ''.join(headerPS)
with open('./PS.csv', 'w') as headPS:
headPS.write(headerPS+'\n')
# Setup dataset header for distance matrices
headerCM = ['PDB_ID']
for r in range(1, 150+1):
for c in range(1, 150+1):
headerCM.append(',{}{}'.format(r, c))
headerCM = ''.join(headerCM)
with open('./CM.csv', 'w') as headCM:
headCM.write(headerCM+'\n')
for File in tqdm.tqdm(os.listdir(directory)):
TheFile = '{}/{}'.format(directory, File)
try:
# Compile angles
pose = pose_from_pdb(TheFile)
phi = []
psi = []
for aa in range(len(pose.residues)):
try:
p = pose.phi(aa+1)
s = pose.psi(aa+1)
if p < 0: p = p+360
if s < 0: s = s+360
phi.append(p)
psi.append(s)
except: pass
angles = []
for P, S in zip(phi, psi):
angles.append(str(round(P, 5))+','+str(round(S, 5)))
assert len(phi) == len(psi)
Angles = ','.join(angles)
if len(angles) >= 150: AngLine = Angles
else:
addition = 150-len(angles)
zeros = []
for adds in range(addition): zeros.append('0.0,0.0')
Zeros = ','.join(zeros)
AngLine = '{},{}'.format(Angles, Zeros)
ThePSLine = '{},{}\n'.format(File, AngLine)
with open('PS.csv', 'a') as PSdata:
PSdata.write(ThePSLine)
#Compile contact map (Ca-Ca contact <= 12 angstroms)
BIO = Bio.PDB.PDBParser(QUIET=True)
structure = BIO.get_structure('X', TheFile)
ppb = Bio.PDB.Polypeptide.PPBuilder()
Type = ppb.build_peptides(structure, aa_only=False)
model = Type
chain = model[0]
CM = []
for aa1 in range(0, 150):
for aa2 in range(0, 150):
try:
residue1 = chain[aa1]
residue2 = chain[aa2]
atom1 = residue1['CA']
atom2 = residue2['CA']
if atom1-atom2 <= 12: CM.append(str(atom1-atom2))
else: CM.append(str(0))
except:
CM.append(str(0))
assert len(CM) == 22500
ContactMap = ','.join(CM)
TheCMLine = '{},{}\n'.format(File, ContactMap)
with open('CM.csv', 'a') as CMdata:
CMdata.write(TheCMLine)
except: pass
def VectorisePSCM(self, PS_file='PS.csv',
CM_file='CM.csv',
C_MAX=12,
fp=np.float64):
'''
This function vectorises the backbone PS and CM datasets, normalises
them, combines them, as well as constructs the final tensor and
export the result as a serial.
'''
# 1. Import a single row of PS dataset
with open(PS_file) as PSf:
next(PSf)
P, S = [], []
for line in PSf:
# 2. Isolate different angles
line = line.strip().split(',')
p = [float(item) for item in line[1::2]]
s = [float(item) for item in line[2::2]]
assert len(p) == len(s)
P.append(np.array(p, dtype=fp))
S.append(np.array(s, dtype=fp))
with open(CM_file) as CMf:
next(CMf)
CM = []
for line in CMf:
# 3. Isolate different points
line = [float(item) for item in line.strip().split(',')[1:]]
cm = np.reshape(line, (150, 150))
CM.append(np.array(cm, dtype=fp))
# 4. Construct PS matrices
P = np.array(P)
S = np.array(S)
# 5. Normalise PS angles (min/max) [-1, 1]
P /= 180
S /= 180
P -= 1
S -= 1
PS = np.array([P, S])
PS = np.swapaxes(PS, 0, 2)
PS = np.swapaxes(PS, 0, 1)
# 6. Construct CM matrices
CM = np.array(CM)
# 7. Normalise CM contact map (min/max) [-1, 1]
CM /= (C_MAX/2)
CM -= 1
# 8. Construct final dataset matrix
dataset = np.concatenate([PS, CM], axis=2)
# 9. Suffle dataset
sklearn.utils.shuffle(dataset)
# 10. Serialise tensors
with h5py.File('PS+CM.h5', 'w') as data:
dataset = data.create_dataset('default', data=dataset)
def DatasetAsPSaM(self, directory):
'''
Compile a dataset of each residue's amino acid identify, secondary
structure, phi angle, psi angle, solvent accessible surface area as
a .csv file and the contact map as a separate .csv file. to be run
after clean() on the ./cleaned directory, also outputs a file
identifying the sizes of structures, so the largest value can be used
with HeaderAsPSaM()
'''
os.makedirs('./Completed', exist_ok=True)
os.makedirs('./Error_NotEqual', exist_ok=True)
os.makedirs('./Error_Broken', exist_ok=True)
os.makedirs('./Error_Small', exist_ok=True)
for File in tqdm.tqdm(os.listdir(directory)):
try:
TheFile = '{}/{}'.format(directory, File)
pose = pose_from_pdb(TheFile)
DSSP = pyrosetta.rosetta.protocols.moves.DsspMover()
DSSP.apply(pose)
sasa_calc = pyrosetta.rosetta.core.scoring.sasa.SasaCalc()
sasa_calc.calculate(pose)
size = pose.total_residue()
aa = []
ss = []
phi = []
psi = []
sasa = []
info = []
ctmp = []
m = []
surf = list(sasa_calc.get_residue_sasa())
for r in range(size):
if pose.residue(r+1).is_protein():
aa.append(pose.sequence(r+1, r+1))
ss.append(pose.secstruct(r+1))
p = pose.phi(r+1)
if p < 0: p = p+360
phi.append(p)
s = pose.psi(r+1)
if s < 0: s = s+360
psi.append(s)
sasa.append(surf[r])
for r in range(0, size):
for R in range(0, size):
if pose.residue(r+1).is_protein() and\
pose.residue(R+1).is_protein():
CAr = pose.residue(r+1).xyz('CA')
CAR = pose.residue(R+1).xyz('CA')
CAr_CAR_vector = CAR-CAr
Cont = CAr_CAR_vector.norm()
if Cont <= 12: ctmp.append(Cont)
else: ctmp.append(0)
if len(aa) >= 50:
try:
assert len(aa) == len(ss) == len(phi)\
== len(psi) == len(sasa) == math.sqrt(len(ctmp))
for AA,SS,P,S,SASA in zip(aa,ss,phi,psi,sasa):
info.append('{},{},{},{},{}'\
.format(AA, SS, P, S, SASA))
Info = ','.join(info)
with open('./AsPSa_noheader_nofill.csv', 'a') as data:
data.write(File + ',' + Info + '\n')
with open('lengths.txt', 'a') as length:
length.write(str(len(aa))+',')
for x in ctmp:
m.append('{}'.format(x))
M = ','.join(m)
with open('./M_noheader_nofill.csv', 'a') as data:
data.write(File + ',' + M + '\n')
os.system('mv {} ./Completed'.format(TheFile))
except:
os.system('mv {} ./Error_NotEqual'\
.format(TheFile))
else: os.system('mv {} ./Error_Small'.format(TheFile))
except: passos.system('mv {} ./Error_Broken'.format(TheFile))
def HeaderAsPSaM(self, choice='AsPSa'):
'''
Constructs a .csv header and completes the dataset. To find the value of
the largest structure run: sort -nk 1 lengths.txt
'''
with open('lengths.txt', 'r') as L:
length = int(max(L.readlines()[0].strip().split(',')))
header = ['PDB_ID']
if choice == 'AsPSa':
for i in range(1, length+1):
header.append(',aa_{},ss_{},phi_{},psi_{},sasa_{}'\
.format(i, i, i, i, i))
header = ''.join(header)
with open('./AsPSa_noheader_nofill.csv', 'r') as data:
with open('./AsPSa_nofill.csv', 'w') as head:
head.write(header+'\n')
for line in data:
head.write(line)
os.remove('./AsPSa_noheader_nofill.csv')
elif choice == 'M':
for r in range(1, length+1):
for c in range(1, length+1):
header.append(',{}{}'.format(r, c))
header = ''.join(header)
with open('./M_noheader_nofill.csv', 'r') as data:
with open('./M_nofill.csv', 'w') as head:
head.write(header+'\n')
for line in data:
head.write(line)
os.remove('./M_noheader_nofill.csv')
def Fill(self, filename):
''' Fills missing .csv table spaces with zeros '''
name = filename.split('_')[0]
with open(filename) as f:
with open(name+'.csv', 'a') as F:
first_line = f.readline()
F.write(first_line)
size = len(first_line.strip().split(','))
for line in f:
line = line.strip().split(',')
gap = size - len(line)
for zero in range(gap):
line.append('0')
new_line = ','.join(line)
F.write(new_line + '\n')
os.remove(filename)
def VectoriseAsPSaM(self, filenameA='AsPSa.csv', filenameM='M.csv'):
'''
This function vectorises the backbone PS and CM datasets, normalises
them, combines them, as well as constructs the final tensor and
export the result as a serial.
'''
pass
def build(self, switches='', directory='PDBDatabase'):
if len(switches) == 20:
switch = list(switches)
if switch[0] == '1': self.Database('DATABASE', directory)
if switch[1] == '1': self.Extract(directory)
if switch[2] == '1': self.NonProtein(directory)
if switch[3] == '1': self.Size(directory, 80, 150)
if switch[4] == '1': self.Break(directory)
if switch[5] == '1': self.Loops(directory, 10)
if switch[6] == '1': self.Renumber(directory)
if switch[7] == '1': self.Rg(directory, 15)
########## --- HUMAN EYE FILTERING --- ##########
if switch[8] == '1': self.Clean(directory)
if switch[9] == '1': self.Path('PDBCleaned', '{PATH}')
if switch[10] == '1': self.RelaxHPC('~/Rosetta', 829)
if switch[11] == '1': self.Relax('PDBCleaned')
if switch[12] == '1': self.DatasetAsPSaM('PDBCleaned')
if switch[13] == '1': self.HeaderAsPSaM('AsPSa')
if switch[14] == '1':
self.HeaderAsPSaM('M')
os.remove('lengths.txt')
if switch[15] == '1':
self.Fill('AsPSa_nofill.csv')
self.Fill('M_nofill.csv')
if switch[16] == '1': self.DatasetPSCM('PDBCleaned')
if switch[17] == '1': self.C_Max('dataset_CM.csv')
if switch[18] == '1': self.VectorisePSCM()
if switch[18] == '1': self.VectoriseAsPSaM()
else: print('\x1b[31m[-] Error\x1b[33m: Wrong string length\x1b[0m')
def Vall(filename='vall.jul19.2011', m=16800, nx=1490):
'''
Compile the PDB IDs, chains, phi, psi, omega, and SASA of all the structures
from the Rosetta vall.jul19.2011 database into a .csv file
'''
assert os.path.isfile('./{}'.format(filename)),\
'Make sure the vall.jul19.2011 file is in the same directory as this script'
with open(filename, 'r') as f:
with open('Fragments.csv', 'w') as F:
header = ['PDBID,Chain']
for i in range(1, nx+1):
header.append(',AA_{},SS_{},P_{},S_{},O_{},SASA_{}'\
.format(i, i, i, i, i, i))
header = ''.join(header)
F.write(header + '\n')
for i in range(30): next(f)
ID = []
CH = []
AA = []
SS = []
P = []
S = []
O = []
SASA= []
ID_seen = set()
for line in f:
line = line.strip().split()
if line[0] not in ID_seen:
exp = []
for aa, ss, p, s, o, sasa in zip(AA, SS, P, S, O, SASA):
exp.append('{},{},{},{},{},{}'\
.format(aa, ss, p, s, o, sasa))
exp = ','.join(exp)
if exp == '': pass
else: F.write(ID + ',' + CH + ',' + exp + '\n')
ID = None
CH = None
AA = []
SS = []
P = []
S = []
O = []
SASA = []
ID_seen.add(line[0])
ID = line[0][:4].upper()
CH = line[0][-1].upper()
AA.append(line[1])
SS.append(line[2])
P.append(line[14])
S.append(line[15])
O.append(line[16])
SASA.append(line[19])
else:
ID = line[0][:4].upper()
CH = line[0][-1].upper()
AA.append(line[1])
SS.append(line[2])
P.append(line[14])
S.append(line[15])
O.append(line[16])
SASA.append(line[19])
exp = []
for aa, ss, p, s, o, sasa in zip(AA, SS, P, S, O, SASA):
exp.append('{},{},{},{},{},{}'\
.format(aa, ss, p, s, o, sasa))
exp = ','.join(exp)
F.write(ID + ',' + CH + ',' + exp)
def Frag_vectorise(filename='Fragments.csv', nx=1452):
''' Vectorises the fragments dataset, normalises it, then serialises it '''
# 1. Import data
rows = len(open(filename).readlines()) - 1
# 2. Generate a list of random number of rows
lines = list(range(1, rows + 1))
random.shuffle(lines)
# 3. Open CSV file
with open(filename, 'r') as File: all_lines_variable = File.readlines()
PDBID, CHAIN, X, Y = [], [], [], []
for i in tqdm.tqdm(lines):
# 4. Import data line by line
line = all_lines_variable[i]
line = line.strip().split(',')
if line[0] == '1OFD': continue # Causes an error
aa = np.array(line[2::6])
ss = np.array(line[3::6])
p = np.array(line[4::6])
s = np.array(line[5::6])
o = np.array(line[6::6])
sasa = np.array(line[7::6])
p = np.array([float(i) for i in p])
s = np.array([float(i) for i in s])
o = np.array([float(i) for i in o])
sasa = np.array([float(i) for i in sasa])
# 5. Re-format data
aa[aa=='A'] = 0
aa[aa=='C'] = 1
aa[aa=='D'] = 2
aa[aa=='E'] = 3
aa[aa=='F'] = 4
aa[aa=='G'] = 5
aa[aa=='H'] = 6
aa[aa=='I'] = 7
aa[aa=='K'] = 8
aa[aa=='L'] = 9
aa[aa=='M'] = 10
aa[aa=='N'] = 11
aa[aa=='P'] = 12
aa[aa=='Q'] = 13
aa[aa=='R'] = 14
aa[aa=='S'] = 15
aa[aa=='T'] = 16
aa[aa=='V'] = 17
aa[aa=='W'] = 18
aa[aa=='Y'] = 19
ss[ss=='L'] = 0
ss[ss=='H'] = 1
ss[ss=='E'] = 2
p[p<0] = p[p<0] + 360
s[s<0] = s[s<0] + 360
o[o<0] = o[o<0] + 360
aa = aa.astype(int)
ss = ss.astype(int)
# 6. Padding categories
gap = nx - aa.size
for pad in range(gap):
aa = np.append(aa, -1)
ss = np.append(ss, -1)
# 7. One-hot encode amino acid sequences and secondary structures
Aminos = []
for x in aa:
letter = [0 for _ in range(20)]
if x != -1: letter[x] = 1
Aminos.append(letter)
Struct = []
for x in ss:
letter = [0 for _ in range(3)]
if x != -1: letter[x] = 1
Struct.append(letter)
aa = np.array(Aminos)
ss = np.array(Struct)
# 8. Normalise data [min/max]
p = (p-0)/(360-0)
s = (s-0)/(360-0)
o = (o-0)/(360-0)
sasa = (sasa-0)/(277-0)
# 9. Padding values
for pad in range(gap):
p = np.append(p, 0)
s = np.append(s, 0)
o = np.append(o, 0)
sasa = np.append(sasa, 0)
# 10. Expand axis
p = np.expand_dims(p, axis=1)
s = np.expand_dims(s, axis=1)
o = np.expand_dims(o, axis=1)
sasa = np.expand_dims(sasa, axis=1)
# 11. Export
featur = np.concatenate((aa, ss), axis=1)
angles = np.concatenate((p, s, o), axis=1)
PDBID.append(line[0])
CHAIN.append(line[1])
X.append(featur)
Y.append(angles)
PDBID = np.array(PDBID)
CHAIN = np.array(CHAIN)
PDBID = np.expand_dims(PDBID, axis=1)
CHAIN = np.expand_dims(CHAIN, axis=1)
X = np.array(X)
Y = np.array(Y)
print('X =', X.shape)
print('Y =', Y.shape)
# 12. Serialise tensors
with h5py.File('Frag_Y.h5', 'w') as y:
dset = y.create_dataset('default', data=Y)
with h5py.File('Frag_X.h5', 'w') as x:
dset = x.create_dataset('default', data=X)
def SQM(filename):
'''
Structure Quality Metric:
Calculates the ratio of helices and sheets to loops, the percent of amino
acids comprising the structure core, and the radius of gyration as values
between 0.0-1.0, it then averages the three values. Returns a value between
0.0-1.0 where good structure >= 0.6
'''
parser = Bio.PDB.PDBParser()
structure = parser.get_structure('{}'.format(filename), filename)
dssp = Bio.PDB.DSSP(structure[0], filename, acc_array='Wilke')
AminoAcid = { 'A':129, 'P':159, 'N':195, 'H':224,
'V':174, 'Y':263, 'C':167, 'K':236,
'I':197, 'F':240, 'Q':225, 'S':155,
'L':201, 'W':285, 'E':223, 'T':172,
'M':224, 'R':274, 'G':104, 'D':193}
sec_struct = []
SASA = []
for aa in dssp:
if aa[2] == 'G' or aa[2] == 'H' or aa[2] == 'I': ss = 'H'
elif aa[2] == 'B' or aa[2] == 'E': ss = 'S'
elif aa[2] == 'S' or aa[2] == 'T' or aa[2] == '-': ss = 'L'
sec_struct.append(ss)
sasa = AminoAcid[aa[1]]*aa[3]
if sasa <= 25: sasa = 'C'
elif 25 < sasa < 40:sasa = 'B'
elif sasa >= 40: sasa = 'S'
SASA.append(sasa)
''' Secondary structure measurement '''
H = len([x for x in sec_struct if x == 'H'])
S = len([x for x in sec_struct if x == 'S'])
L = len([x for x in sec_struct if x == 'L'])
total = len(sec_struct)
ratio = (H+S)/total
limit = 1
slope = 10
bias = 0.5
SS = limit/(1+np.exp(slope*(bias-ratio)))
''' SASA measurement '''
surface = len([x for x in SASA if x == 'S'])
boundery = len([x for x in SASA if x == 'B'])
in_core = len([x for x in SASA if x == 'C'])
total = len(SASA)
percent = (in_core*100)/total
Core = (2.50662/math.sqrt(2*(math.pi)))*math.exp(-((percent-30)**2)/100)
''' Radius of gyration measurement '''
coord = list()
mass = list()
Structure = open(filename, 'r')
for line in Structure:
try:
line = line.split()
x = float(line[6])
y = float(line[7])
z = float(line[8])
coord.append([x, y, z])
if line[-1] == 'C': mass.append(12.0107)
elif line[-1] == 'O': mass.append(15.9994)
elif line[-1] == 'N': mass.append(14.0067)
elif line[-1] == 'S': mass.append(32.065)
except: pass
xm = [(m*i, m*j, m*k) for (i, j, k), m in zip(coord, mass)]
tmass = sum(mass)
rr = sum(mi*i + mj*j + mk*k for (i, j, k), (mi, mj, mk) in zip(coord, xm))
mm = sum((sum(i)/tmass)**2 for i in zip(*xm))
rg = math.sqrt(rr/tmass-mm)
Rg = (2.50662/math.sqrt(2*(math.pi)))*math.exp(-((rg-12)**2)/40)
''' The metric '''
TheMetric = sum([SS, Core, Rg])/3
return(round(TheMetric, 5))
class fold():
''' Folds a protein structure given the phi/psi angles and contact map '''
def __init__(self, Pa, Sa, CM):
CM = np.reshape(CM, (150, 150))
self.size = len([i for i in np.diag(CM, k=1) if i!=0])
self.U = | np.triu(CM, k=0) | numpy.triu |
"""Conformal regressors and predictive systems (crepes)
Routines that implement conformal regressors and conformal predictive
systems, which transform point predictions into prediction intervals
and cumulative distributions, respectively.
Author: <NAME> (<EMAIL>)
Copyright 2021 <NAME>
License: BSD 3 clause
"""
# To do:
#
# - error messages
# - commenting and documentation
# - test for uniformity of p-values (in evaluate)
import numpy as np
import pandas as pd
import time
class ConformalPredictor():
def __init__(self):
self.alphas = None
self.fitted = False
self.normalized = None
self.mondrian = None
self.time_fit = None
self.time_predict = None
self.time_evaluate = None
class ConformalRegressor(ConformalPredictor):
"""
Conformal Regressor.
A conformal regressor transforms point predictions (regression values) into
prediction intervals, for a certain confidence level.
"""
def __repr__(self):
if self.fitted:
return "ConformalRegressor(fitted={}, normalized={}, mondrian={})".format(self.fitted, self.normalized, self.mondrian)
else:
return "ConformalRegressor(fitted={})".format(self.fitted)
def fit(self, residuals=None, sigmas=None, bins=None):
"""
Fit conformal regressor.
Parameters
----------
residuals : array-like of shape (n_values,)
Residuals; actual - predicted
sigmas: array-like of shape (n_values,)
Sigmas; difficulty estimates
bins : array-like of shape (n_values,)
Bins; Mondrian categories
Returns
-------
self : object
Fitted ConformalRegressor.
"""
tic = time.time()
abs_residuals = np.abs(residuals)
if bins is None:
self.mondrian = False
if sigmas is None:
self.normalized = False
self.alphas = np.sort(abs_residuals)[::-1]
else:
self.normalized = True
self.alphas = np.sort(abs_residuals/sigmas)[::-1]
else:
self.mondrian = True
bin_values = np.unique(bins)
if sigmas is None:
self.normalized = False
self.alphas = (bin_values,[np.sort(abs_residuals[bins==b])[::-1] for b in bin_values])
else:
self.normalized = True
self.alphas = (bin_values, [np.sort(abs_residuals[bins==b]/sigmas[bins==b])[::-1] for b in bin_values])
self.fitted = True
toc = time.time()
self.time_fit = toc-tic
return self
def predict(self, y_hat=None, sigmas=None, bins=None, confidence=0.95, y_min=-np.inf, y_max=np.inf):
"""
Predict using the conformal regressor.
Parameters
----------
y_hat : array-like of shape (n_values,)
predicted (regression) values
sigmas : array-like of shape (n_values,)
Sigmas; difficulty estimates
bins : array-like of shape (n_values,)
Bins; Mondrian categories
confidence : float in range (0,1), default = 0.95
The confidence level.
y_min : float or int, default = -np.inf
The minimum value to include in prediction intervals.
y_max : float or int, default = np.inf
The maximum value to include in prediction intervals.
Returns
-------
intervals : ndarray of shape (n_values, 2)
Prediction intervals.
"""
tic = time.time()
intervals = np.zeros((len(y_hat),2))
if not self.mondrian:
alpha_index = int((1-confidence)*(len(self.alphas)+1))-1
if alpha_index >= 0:
alpha = self.alphas[alpha_index]
if self.normalized:
intervals[:,0] = y_hat-alpha*sigmas
intervals[:,1] = y_hat+alpha*sigmas
else:
intervals[:,0] = y_hat-alpha
intervals[:,1] = y_hat+alpha
else:
intervals[:,0] = -np.inf # If the no. of calibration instances is too small for the chosen confidence level,
intervals[:,1] = np.inf # then the intervals will be of maximum size
else:
bin_values, bin_alphas = self.alphas
bin_indexes = [np.argwhere(bins == b).T[0] for b in bin_values]
alpha_indexes = [int((1-confidence)*(len(bin_alphas[b])+1))-1 for b in range(len(bin_values))]
bin_alpha = [bin_alphas[b][alpha_indexes[b]] if alpha_indexes[b]>=0 else np.inf for b in range(len(bin_values))]
if self.normalized:
for b in range(len(bin_values)):
intervals[bin_indexes[b],0] = y_hat[bin_indexes[b]]-bin_alpha[b]*sigmas[bin_indexes[b]]
intervals[bin_indexes[b],1] = y_hat[bin_indexes[b]]+bin_alpha[b]*sigmas[bin_indexes[b]]
else:
for b in range(len(bin_values)):
intervals[bin_indexes[b],0] = y_hat[bin_indexes[b]]-bin_alpha[b]
intervals[bin_indexes[b],1] = y_hat[bin_indexes[b]]+bin_alpha[b]
if y_min > -np.inf:
intervals[intervals<y_min] = y_min
if y_max < np.inf:
intervals[intervals>y_max] = y_max
toc = time.time()
self.time_predict = toc-tic
return intervals
def evaluate(self, y_hat=None, y=None, sigmas=None, bins=None, confidence=0.95, y_min=-np.inf, y_max=np.inf, metrics=None):
"""
Evaluate the conformal regressor.
Parameters
----------
y_hat : array-like of shape (n_values,)
predicted (regression) values
sigmas : array-like of shape (n_values,)
Sigmas; difficulty estimates
bins : array-like of shape (n_values,)
Bins; Mondrian categories
confidence : float in range (0,1), default = 0.95
The confidence level.
y_min : float or int, default = -np.inf
The minimum value to include in prediction intervals.
y_max : float or int, default = np.inf
The maximum value to include in prediction intervals.
metrics : a string or a list of strings, default = list of all metrics
Evaluation metrics: "error","efficiency", "time_fit","time_evaluate"
Returns
-------
results : dictionary with a key for each selected metric
Estimated performance using the metrics.
"""
tic = time.time()
if metrics is None:
metrics = ["error","efficiency","time_fit","time_evaluate"]
test_results = {}
intervals = self.predict(y_hat, sigmas, bins, confidence, y_min, y_max)
if "error" in metrics:
test_results["error"] = 1-np.mean(np.logical_and(intervals[:,0]<=y,y<=intervals[:,1]))
if "efficiency" in metrics:
test_results["efficiency"] = np.mean(intervals[:,1]-intervals[:,0])
if "time_fit" in metrics:
test_results["time_fit"] = self.time_fit
toc = time.time()
self.time_evaluate = toc-tic
if "time_evaluate" in metrics:
test_results["time_evaluate"] = self.time_evaluate
return test_results
class ConformalPredictiveSystem(ConformalPredictor):
"""
Conformal Predictive System.
A conformal predictive system transforms point predictions (regression values) into
cumulative distributions (conformal predictive distributions).
"""
def __repr__(self):
if self.fitted:
return "ConformalPredictiveSystem(fitted={}, normalized={}, mondrian={})".format(self.fitted, self.normalized, self.mondrian)
else:
return "ConformalPredictiveSystem(fitted={})".format(self.fitted)
def fit(self, residuals=None, sigmas=None, bins=None):
"""
Fit conformal predictive system.
Parameters
----------
residuals : array-like of shape (n_values,)
Residuals; actual - predicted
sigmas: array-like of shape (n_values,)
Sigmas; difficulty estimates
bins : array-like of shape (n_values,)
Bins; Mondrian categories
Returns
-------
self : object
Fitted ConformalPredictiveSystem.
"""
tic = time.time()
if bins is None:
self.mondrian = False
if sigmas is None:
self.normalized = False
self.alphas = np.sort(residuals)
else:
self.normalized = True
self.alphas = np.sort(residuals/sigmas)
else:
self.mondrian = True
bin_values = np.unique(bins)
if sigmas is None:
self.normalized = False
self.alphas = (bin_values,[ | np.sort(residuals[bins==b]) | numpy.sort |
import numpy as np
import math
def input_matrix(size):
"""enter matrix
Args:
size (int): size for matrix
Returns:
np.array: matrix with args
"""
matrix = np.array([[float(j) for j in input("Строка матрицы: ").split()]
for i in range(size)])
return matrix
def input_vector():
"""enter vector
Returns:
list: vector with args
"""
return list(map(int, input('Элементы вектора: ').split()))
def get_inverted(matrix_b, vector_x, position):
"""_summary_
Args:
matrix_b (np.array): source matrix
vector_x (np.array): plan
position (np.array): index
Returns:
np.array: inverted matrix_
"""
vector_l = matrix_b.dot(vector_x.T)
if vector_l[position] == 0:
return None
vector_l_cover = vector_l[position]
vector_l[position] = -1
vector_l *= -1 / vector_l_cover
matrix_b_new = np.eye(len(matrix_b), dtype=float)
matrix_b_new[:, position] = vector_l
return matrix_b_new.dot(matrix_b)
def main_stage_simplex_method(m, n, matrix_a, vector_b, vector_c, vector_x,
vector_jb):
"""main stage simplex
Args:
m (int): count of rows
n (int): count of colums
matrix_a (np.array): matrix with values
vector_b (list): vector b
vector_c (list): vector c
vector_x (list): vector x
vector_jb (list): vector jb
Returns:
object: None or list
"""
if m == n:
return vector_x
matrix_ab = matrix_a[:, vector_jb]
matrix_b = np.linalg.inv(matrix_ab)
while True:
vector_jb_n = [i for i in range(n) if i not in vector_jb]
delta = vector_c[vector_jb].dot(matrix_b).dot(
matrix_a[:, vector_jb_n]) - vector_c[vector_jb_n]
checker = -1
for i, el in enumerate(delta):
if el < 0:
checker = i
break
if checker == -1:
return vector_x
j0 = vector_jb_n[checker]
vector_z = matrix_b.dot(matrix_a[:, j0])
if all([i <= 0 for i in vector_z]):
return None
theta = [vector_x[vector_jb[i]] / vector_z[i]
if vector_z[i] > 0 else math.inf for i in range(m)]
theta_0 = min(theta)
s = theta.index(theta_0)
vector_jb[s] = j0
matrix_b = get_inverted(matrix_b, matrix_a[:, j0], s)
if matrix_b is None:
return None
vector_x_new = np.zeros(n, dtype=float)
vector_x_new[vector_jb] = vector_x[vector_jb] - theta_0 * vector_z
vector_x_new[j0] = theta_0
vector_x = vector_x_new
def first_step_simplex_method(matrix_a, vector_b, m, n):
for i in range(m):
if vector_b[i] < 0:
vector_b[i] *= -1
matrix_a[i] *= -1
vector_jb = [i for i in range(n, n + m)]
zeros = [0. for i in range(n)]
ones = [1. for i in range(m)]
matrix = np.concatenate((matrix_a, | np.eye(m) | numpy.eye |
import numpy as np
from nsgt.fft import rfftp, irfftp, fftp, ifftp
import unittest
class TestFFT(unittest.TestCase):
def __init__(self, methodName, n=10000):
super(TestFFT, self).__init__(methodName)
self.n = n
def test_rfft(self):
seq = np.random.random(self.n)
ft = rfftp()
a = ft(seq)
b = np.fft.rfft(seq)
self.assertTrue(np.allclose(a, b))
def test_irfft(self):
seq = np.random.random(self.n)+np.random.random(self.n)*1.j
outn = (self.n-1)*2 + np.random.randint(0,2) # even or odd output size
ft = irfftp()
a = ft(seq, outn=outn)
b = np.fft.irfft(seq, n=outn)
self.assertTrue(np.allclose(a, b))
def test_fft(self):
seq = np.random.random(self.n)
ft = fftp()
a = ft(seq)
b = np.fft.fft(seq)
self.assertTrue( | np.allclose(a, b) | numpy.allclose |
import numpy as np
from gensim import corpora, models
from helpers.common import preprocess
DOCUMENT_NUM_TOPICS = 2
class LDAModel:
def __init__(self):
self.topics_subtopics = None
self.model = None
self.dictionary = None
def get_topics_and_subtopics(
self, documents, num_topics, num_words,
depth=3, pre_processing_func=preprocess, passes=10):
if self.topics_subtopics is not None:
return self.topics_subtopics
self.documents = documents
texts = [
pre_processing_func(document).split() for document in documents
]
self.dictionary = corpora.Dictionary(texts)
self.corpus = [self.dictionary.doc2bow(text) for text in texts]
self.model, self.topics_subtopics = _get_subtopics(
self.corpus, self.dictionary, num_topics, num_words, depth, passes
)
return self.topics_subtopics
def get_topics_correlation(self, documents, num_topics, num_words):
# First calculate topics and subtopics
self.get_topics_and_subtopics(
documents, num_topics, num_words, depth=1
)
topics = self.model.get_document_topics(self.corpus)
document_topics = [dict(x) for x in topics]
'''document_topics is now of the form
[ {0: <val>, 1:<val>...}, ...]
where each dict gives the composition of topic in the document
We want vector of each topic'''
topic_names = []
for x in range(num_topics):
data = self.topics_subtopics['Topic {}'.format(x)]
topic_names.append(
data['keywords'][0][0] # get the first keyword
)
# initialize topic vectors with zero
vectors = [
[0.0 for _ in range(len(documents))]
for _ in range(num_topics)
]
for doc_ind, composition in enumerate(document_topics):
for topic_id, value in composition.items():
vectors[topic_id][doc_ind] = value
vectors = [np.asarray(x) for x in vectors]
# normalize vectors
normalized_vectors = [x/ | np.linalg.norm(x) | numpy.linalg.norm |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
import os
def count_lines(fname):
"""count lines in a file
:param fname: filename including path
"""
if not os.path.exists(fname):
return 0
with open(chain_path) as f:
rows = sum(1 for _ in f)
return int(rows)
class JumpProposal(object):
def __init__(self, pta):
"""Set up some custom jump proposals
:param pta: an `enterprise` PTA instance
"""
self.params = pta.params
self.pnames = pta.param_names
self.npar = len(pta.params)
self.ndim = sum(p.size or 1 for p in pta.params)
# parameter map
self.pmap = {}
ct = 0
for p in pta.params:
size = p.size or 1
self.pmap[p] = slice(ct, ct+size)
ct += size
# parameter indices map
self.pimap = {}
for ct, p in enumerate(pta.param_names):
self.pimap[p] = ct
self.snames = {}
for sc in pta._signalcollections:
for signal in sc._signals:
self.snames[signal.signal_name] = signal.params
def draw_from_prior(self, x, iter, beta):
"""Prior draw.
The function signature is specific to PTMCMCSampler.
"""
q = x.copy()
lqxy = 0
# randomly choose parameter
idx = np.random.randint(0, self.npar)
# if vector parameter jump in random component
param = self.params[idx]
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[param]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[idx] = param.sample()
# forward-backward jump probability
lqxy = param.get_logpdf(x[self.pmap[param]]) - param.get_logpdf(q[self.pmap[param]])
return q, float(lqxy)
def draw_from_gwb_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'red noise'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[param]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[param]] = param.sample()
# forward-backward jump probability
lqxy = param.get_logpdf(x[self.pmap[param]]) - param.get_logpdf(q[self.pmap[param]])
return q, float(lqxy)
def draw_from_bwm_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'bwm'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[param]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[param]] = param.sample()
# forward-backward jump probability
lqxy = param.get_logpdf(x[self.pmap[param]]) - param.get_logpdf(q[self.pmap[param]])
return q, float(lqxy)
def draw_from_ephem_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'phys_ephem'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[param]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[param]] = param.sample()
# forward-backward jump probability
lqxy = param.get_logpdf(x[self.pmap[param]]) - param.get_logpdf(q[self.pmap[param]])
return q, float(lqxy)
def draw_from_red_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'red noise'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = param.get_logpdf(x[self.pmap[str(param)]]) - param.get_logpdf(q[self.pmap[str(param)]])
return q, float(lqxy)
def draw_from_dmgp_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
signal_name = 'dm_gp'
# draw parameter from signal model
param = np.random.choice(self.snames[signal_name])
if param.size:
idx2 = np.random.randint(0, param.size)
q[self.pmap[str(param)]][idx2] = param.sample()[idx2]
# scalar parameter
else:
q[self.pmap[str(param)]] = param.sample()
# forward-backward jump probability
lqxy = param.get_logpdf(x[self.pmap[str(param)]]) - param.get_logpdf(q[self.pmap[str(param)]])
return q, float(lqxy)
def draw_from_dm1yr_prior(self, x, iter, beta):
q = x.copy()
lqxy = 0
dm1yr_names = [dmname for dmname in self.pnames if 'dm_s1yr' in dmname]
dmname = np.random.choice(dm1yr_names)
idx = self.pnames.index(dmname)
if 'log10_Amp' in dmname:
q[idx] = np.random.uniform(-10, -2)
elif 'phase' in dmname:
q[idx] = | np.random.uniform(0, 2*np.pi) | numpy.random.uniform |
# -*- coding: UTF-8 -*-
# File: stats.py
import numpy as np
from ..utils.segmentation.segmentation import update_confusion_matrix
from .import logger
from numpy import linalg as LA
__all__ = ['StatCounter', 'BinaryStatistics', 'RatioCounter', 'Accuracy',
'OnlineMoments', 'MIoUStatistics','MIoUBoundaryStatistics']
class StatCounter(object):
""" A simple counter"""
def __init__(self):
self.reset()
def feed(self, v):
"""
Args:
v(float or np.ndarray): has to be the same shape between calls.
"""
self._values.append(v)
def reset(self):
self._values = []
@property
def count(self):
return len(self._values)
@property
def average(self):
assert len(self._values)
return np.mean(self._values)
@property
def sum(self):
assert len(self._values)
return np.sum(self._values)
@property
def max(self):
assert len(self._values)
return max(self._values)
@property
def min(self):
assert len(self._values)
return min(self._values)
class RatioCounter(object):
""" A counter to count ratio of something. """
def __init__(self):
self.reset()
def reset(self):
self._tot = 0
self._cnt = 0
def feed(self, cnt, tot=1):
"""
Args:
cnt(int): the count of some event of interest.
tot(int): the total number of events.
"""
self._tot += tot
self._cnt += cnt
@property
def ratio(self):
if self._tot == 0:
return 0
return self._cnt * 1.0 / self._tot
@property
def count(self):
"""
Returns:
int: the total
"""
return self._tot
class Accuracy(RatioCounter):
""" A RatioCounter with a fancy name """
@property
def accuracy(self):
return self.ratio
class BinaryStatistics(object):
"""
Statistics for binary decision,
including precision, recall, false positive, false negative
"""
def __init__(self):
self.reset()
def reset(self):
self.nr_pos = 0 # positive label
self.nr_neg = 0 # negative label
self.nr_pred_pos = 0
self.nr_pred_neg = 0
self.corr_pos = 0 # correct predict positive
self.corr_neg = 0 # correct predict negative
def feed(self, pred, label):
"""
Args:
pred (np.ndarray): binary array.
label (np.ndarray): binary array of the same size.
"""
assert pred.shape == label.shape, "{} != {}".format(pred.shape, label.shape)
self.nr_pos += (label == 1).sum()
self.nr_neg += (label == 0).sum()
self.nr_pred_pos += (pred == 1).sum()
self.nr_pred_neg += (pred == 0).sum()
self.corr_pos += ((pred == 1) & (pred == label)).sum()
self.corr_neg += ((pred == 0) & (pred == label)).sum()
@property
def precision(self):
if self.nr_pred_pos == 0:
return 0
return self.corr_pos * 1. / self.nr_pred_pos
@property
def recall(self):
if self.nr_pos == 0:
return 0
return self.corr_pos * 1. / self.nr_pos
@property
def false_positive(self):
if self.nr_pred_pos == 0:
return 0
return 1 - self.precision
@property
def false_negative(self):
if self.nr_pos == 0:
return 0
return 1 - self.recall
class OnlineMoments(object):
"""Compute 1st and 2nd moments online (to avoid storing all elements).
See algorithm at: https://www.wikiwand.com/en/Algorithms_for_calculating_variance#/Online_algorithm
"""
def __init__(self):
self._mean = 0
self._M2 = 0
self._n = 0
def feed(self, x):
"""
Args:
x (float or np.ndarray): must have the same shape.
"""
self._n += 1
delta = x - self._mean
self._mean += delta * (1.0 / self._n)
delta2 = x - self._mean
self._M2 += delta * delta2
@property
def mean(self):
return self._mean
@property
def variance(self):
return self._M2 / (self._n - 1)
@property
def std(self):
return np.sqrt(self.variance)
class MIoUBoundaryStatistics(object):
"""
Statistics for MIoUStatistics,
including MIoU, accuracy, mean accuracy
"""
def __init__(self, nb_classes, ignore_label=255, kernel = 7):
self.nb_classes = nb_classes
self.ignore_label = ignore_label
self.kernel = kernel
self.reset()
def reset(self):
self._mIoU = 0
self._accuracy = 0
self._mean_accuracy = 0
self._confusion_matrix_boundary = np.zeros((self.nb_classes, self.nb_classes), dtype=np.uint64)
self._confusion_matrix_inner = np.zeros((self.nb_classes, self.nb_classes), dtype=np.uint64)
def distinguish_boundary(self,predict, label):
w, h = label.shape
r = self.kernel//2
def is_boundary(gt, i, j):
i_min = max(i-r,0)
i_max = min(i+r+1,w)
j_min = max(j-r,0)
j_max = min(j+r+1,h)
small_block = gt[i_min:i_max, j_min:j_max]
if LA.norm(small_block - small_block[r,r],1) == 0: # if all element equal
return False
else:
return True
mask = np.zeros((w,h),dtype=np.uint8)
for i in range(w):
for j in range(h):
mask[i, j] = is_boundary(label, i, j)
boundary_idx = np.where(mask==1)
inner_idx = np.where(mask==0)
boundary_predict = predict[boundary_idx]
inner_predict = predict[inner_idx]
boundary_label = label[boundary_idx]
inner_label = label[inner_idx]
return boundary_predict,inner_predict,boundary_label,inner_label
def feed(self, pred, label):
"""
Args:
pred (np.ndarray): binary array.
label (np.ndarray): binary array of the same size.
"""
assert pred.shape == label.shape, "{} != {}".format(pred.shape, label.shape)
boundary_predict, inner_predict, boundary_label, inner_label = self.distinguish_boundary(pred,label)
self._confusion_matrix_boundary = update_confusion_matrix(boundary_predict, boundary_label, self._confusion_matrix_boundary, self.nb_classes,
self.ignore_label)
self._confusion_matrix_inner = update_confusion_matrix(inner_predict, inner_label,
self._confusion_matrix_inner, self.nb_classes,
self.ignore_label)
@staticmethod
def mIoU(_confusion_matrix):
I = np.diag(_confusion_matrix)
U = np.sum(_confusion_matrix, axis=0) + np.sum(_confusion_matrix, axis=1) - I
assert np.min(U) > 0,"sample number is too small.."
IOU = I*1.0 / U
meanIOU = np.mean(IOU)
return meanIOU
@staticmethod
def accuracy(_confusion_matrix):
return np.sum(np.diag(_confusion_matrix))*1.0 / np.sum(_confusion_matrix)
@staticmethod
def mean_accuracy(_confusion_matrix):
assert np.min(np.sum(_confusion_matrix, axis=1)) > 0, "sample number is too small.."
return np.mean(np.diag(_confusion_matrix)*1.0 / np.sum(_confusion_matrix, axis=1))
def print_result(self):
logger.info("boundary result:")
logger.info("boundary mIoU: {}".format(self.mIoU(self._confusion_matrix_boundary)))
logger.info("boundary accuracy: {}".format(self.accuracy(self._confusion_matrix_boundary)))
logger.info("boundary mean_accuracy: {}".format(self.mean_accuracy(self._confusion_matrix_boundary)))
logger.info("inner result:")
logger.info("inner mIoU: {}".format(self.mIoU(self._confusion_matrix_inner)))
logger.info("inner accuracy: {}".format(self.accuracy(self._confusion_matrix_inner)))
logger.info("inner mean_accuracy: {}".format(self.mean_accuracy(self._confusion_matrix_inner)))
class MIoUStatistics(object):
"""
Statistics for MIoUStatistics,
including MIoU, accuracy, mean accuracy
"""
def __init__(self, nb_classes, ignore_label=255):
self.nb_classes = nb_classes
self.ignore_label = ignore_label
self.reset()
def reset(self):
self._mIoU = 0
self._accuracy = 0
self._mean_accuracy = 0
self._confusion_matrix = np.zeros((self.nb_classes, self.nb_classes), dtype=np.uint64)
def feed(self, pred, label):
"""
Args:
pred (np.ndarray): binary array.
label (np.ndarray): binary array of the same size.
"""
assert pred.shape == label.shape, "{} != {}".format(pred.shape, label.shape)
self._confusion_matrix = update_confusion_matrix(pred, label, self._confusion_matrix, self.nb_classes,
self.ignore_label)
@property
def confusion_matrix(self):
return self._confusion_matrix
@property
def confusion_matrix_beautify(self):
return np.array_str(self._confusion_matrix, precision=12, suppress_small=True)
@property
def mIoU(self):
I = np.diag(self._confusion_matrix)
U = np.sum(self._confusion_matrix, axis=0) + np.sum(self._confusion_matrix, axis=1) - I
#assert np.min(U) > 0,"sample number is too small.."
IOU = I*1.0 / U
meanIOU = np.mean(IOU)
return meanIOU
@property
def mIoU_beautify(self):
I = np.diag(self._confusion_matrix)
U = np.sum(self._confusion_matrix, axis=0) + np.sum(self._confusion_matrix, axis=1) - I
#assert np.min(U) > 0, "sample number is too small.."
IOU = I * 1.0 / U
return np.array_str(IOU, precision=5, suppress_small=True)
@property
def accuracy(self):
return np.sum(np.diag(self._confusion_matrix))*1.0 / np.sum(self._confusion_matrix)
@property
def mean_accuracy(self):
#assert np.min(np.sum(self._confusion_matrix, axis=1)) > 0, "sample number is too small.."
return np.mean(np.diag(self._confusion_matrix)*1.0 / | np.sum(self._confusion_matrix, axis=1) | numpy.sum |
from torch import nn
from torch.autograd import Variable
import torch
from torch.autograd.gradcheck import zero_gradients
from dataset import MNISTbyClass
from torch.utils.data import DataLoader
from argparse import ArgumentParser
from models import MLP_100, ConvNet, \
ConvNetRegressor, ConvConvNetRegressor, \
VAEConvRegressor, MLP_Regressor
import pickle
from tqdm import tqdm
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier as KNN
from matplotlib import pyplot as plt
import numpy as np
import utils
plt.rcParams['image.cmap'] = 'plasma'
plt.switch_backend('agg')
models = {
'mlp': MLP_100,
'conv': ConvNet,
}
regressors = {
'convreg': ConvNetRegressor,
'convconv': ConvConvNetRegressor,
'vae': VAEConvRegressor,
'mlp': MLP_Regressor,
}
parser = ArgumentParser()
parser.add_argument('--model', choices=models.keys())
parser.add_argument('--regressor', choices=regressors.keys())
parser.add_argument('--mnist')
parser.add_argument('--extended', action='store_true')
parser.add_argument('--val_labels', nargs='+', type=int,
help='Which labels to use as validation')
parser.add_argument('--index')
parser.add_argument('--label', type=int,
help='label of current model')
parser.add_argument('--no_bias', action='store_true')
parser.add_argument('--h1', type=float, default=0.75)
parser.add_argument('--h2', type=float, default=0.5)
parser.add_argument('--model_path')
parser.add_argument('--regressor_path')
parser.add_argument('--w1_path')
parser.add_argument('--gradient', action='store_true')
parser.add_argument('--boundary', action='store_true')
parser.add_argument('--nn', action='store_true')
parser.add_argument('--save', type=str)
def follow_gradient(img, net, alpha):
boundary = []
loss = nn.BCELoss()
y = Variable(torch.FloatTensor(1, 1))
if torch.cuda.is_available():
img = img.cuda()
y = y.cuda()
x = Variable(img, requires_grad=True)
zero_gradients(x)
# get initial prediction
out = net(x)
pred = (out.data[0] > 0.5).cpu()
boundary.append((img.cpu().squeeze(0).numpy(), out.data[0, 0]))
# copy prediction into y
y.data.copy_(pred)
for _ in range(10):
error = loss(out, y)
error.backward()
gradient = torch.sign(x.grad.data)
gradient_mask = alpha * gradient
# create updated img
output = x.data + gradient_mask
output.clamp_(0., 1.)
# put step in x
x.data.copy_(output)
# repeat the process
zero_gradients(x)
out = net(x)
boundary.append((output.cpu().squeeze(0).numpy(), out.data[0, 0]))
return boundary, pred
def random_jitter(img, net, sigma):
boundary = []
noise = Variable(torch.zeros_like(img))
if torch.cuda.is_available():
img = img.cuda()
noise = noise.cuda()
x = Variable(img)
for _ in range(10):
noise.data.normal_(0, sigma)
jittered = x + noise
out = net(jittered)
boundary.append((jittered.data.cpu().squeeze(0).numpy(), out.data[0, 0]))
return boundary
def model_decision(net, args, regressed_net=None, w1_net=None):
dataset = MNISTbyClass(args.mnist, args.index,
args.label, 400,
relevant_labels=args.val_labels, train_split=False,
extended=args.extended)
dataloader = DataLoader(dataset, batch_size=1, pin_memory=True,
shuffle=False, num_workers=0)
real = []
boundary_og = []
boundary_reg = []
boundary_w1 = []
acc_og = 0
acc_reg = 0
acc_w1 = 0
total = 0
for img, label in tqdm(dataloader):
out, pred = follow_gradient(img, net, 0.1)
real.append((img.squeeze(0).numpy(), label[0]))
boundary_og += out
total += 1
acc_og += (pred.long() == label)[0]
if regressed_net is not None:
out, pred_reg = follow_gradient(img, regressed_net, 0.1)
boundary_reg += out
acc_reg += (pred_reg.long() == label)[0]
if w1_net is not None:
out, pred_w1 = follow_gradient(img, w1_net, 0.1)
boundary_w1 += out
acc_w1 += (pred_w1.long() == label)[0]
print(f'Original Accuracy: {acc_og/total:.3f}')
print(f'Regressed Accuracy: {acc_reg/total:.3f}')
print(f'W1 Accuracy: {acc_w1/total:.3f}')
return real, boundary_og, boundary_reg, boundary_w1
def viz(net, args, regressed=None, w1_net=None):
real, boundary, boundary_reg, boundary_w1 = model_decision(
net, args, regressed_net=regressed, w1_net=w1_net)
real_points = np.stack([x[0] for x in real])
real_labels = np.stack([x[1] for x in real])
bound_points = np.stack([x[0] for x in boundary])
bound_labels = np.stack([x[1] for x in boundary])
pca = PCA(n_components=2)
pca.fit(real_points)
real_points = pca.transform(real_points)
bound_points = pca.transform(bound_points)
if regressed is not None:
bound_reg_points = np.stack([x[0] for x in boundary_reg])
bound_reg_labels = np.stack([x[1] for x in boundary_reg])
bound_reg_points = pca.transform(bound_reg_points)
if w1_net is not None:
bound_w1_points = | np.stack([x[0] for x in boundary_w1]) | numpy.stack |
#!/usr/bin/env python
# coding: utf-8
# In[5]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from libsvm.svmutil import *
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from timeit import default_timer as timer
#Reading files
data_points_train = pd.read_csv('2019MT60763.csv', header = None, nrows = 3000)
data = np.array((data_points_train.sort_values(data_points_train.columns[25])).values)
dp = np.array(data)
class_label = dp[:,25]
# counting no of occurence of labels of each class
unique, counts = np.unique(class_label, return_counts=True)
dict(zip(unique, counts))
#print(counts)
# for 25 features
# FOR CLASSES {0,1}
text_x = dp[:631,:25]
text_t = dp[:631,25]
# for cross_validation
tp_x_1 = np.append(dp[:100,:25],dp[306:406,:25],axis=0)
tp_t_1 = np.append(dp[:100,25],dp[306:406,25],axis=0)
tp_x_2 = np.append(dp[101:201,:25],dp[407:507,:25],axis=0)
tp_t_2 = np.append(dp[101:201,25],dp[407:507,25],axis=0)
tp_x_3 = np.append(dp[202:305,:25],dp[508:631,:25],axis=0)
tp_t_3 = np.append(dp[202:305,25],dp[508:631,25],axis=0)
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='linear'))])
parameters = {'SVM__C':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x, text_t)
print ('Training score',G.score(text_x, text_t))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
x = G.score(tp_x_2, tp_t_2)
x+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
x+=G.score(tp_x_3, tp_t_3)
x+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
x+=G.score(tp_x_2, tp_t_2)
x+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',x/6)
print(((svm.SVC(kernel = 'linear', C = 1)).fit(text_x,text_t)).support_)
fig = plt.figure(1)
c = np.logspace(0, 1, 10)
matrix = np.zeros((10,3))
for i in range (10):
svc = svm.SVC(kernel='linear',C = c[i])
svc.fit(text_x, text_t)
matrix[i][0] = i
matrix[i][1] = svc.score(text_x, text_t)
svc.fit(tp_x_1,tp_t_1)
x1 = svc.score(tp_x_2, tp_t_2)
x1+=svc.score(tp_x_3, tp_t_3)
svc.fit(tp_x_2,tp_t_2)
x1+=svc.score(tp_x_3, tp_t_3)
x1+=svc.score(tp_x_1, tp_t_1)
svc.fit(tp_x_3,tp_t_3)
x1+=svc.score(tp_x_2, tp_t_2)
x1+=svc.score(tp_x_1, tp_t_1)
matrix[i][2] = x1/6
plt.plot(matrix[:,0:1],matrix[:,1:2],label = 'cross_validation score')
plt.plot(matrix[:,0:1],matrix[:,2:3],label = 'Training score')
plt.title('C vs Accuracy')
plt.xlabel('C')
plt.ylabel('Accuracy')
plt.xscale('log')
plt.legend()
plt.show()
PIPE = Pipeline([('scaler', StandardScaler()), ('SVM', svm.SVC(kernel='rbf'))])
parameters = {'SVM__C':np.logspace(0, 1, 10), 'SVM__gamma':np.logspace(0, 1, 10)}
G = GridSearchCV(PIPE, param_grid=parameters, cv=5)
G.fit(text_x, text_t)
print ('Training score',G.score(text_x, text_t))
print (G.best_params_)
G.fit(tp_x_1,tp_t_1)
y = G.score(tp_x_2, tp_t_2)
y+=G.score(tp_x_3, tp_t_3)
G.fit(tp_x_2,tp_t_2)
y+=G.score(tp_x_3, tp_t_3)
y+=G.score(tp_x_1, tp_t_1)
G.fit(tp_x_3,tp_t_3)
y+=G.score(tp_x_2, tp_t_2)
y+=G.score(tp_x_1, tp_t_1)
print('Cross_validation score',y/6)
print(((svm.SVC(kernel = 'rbf', C = 1.29,gamma = 1)).fit(text_x,text_t)).support_)
puto = | np.zeros((100,1)) | numpy.zeros |
"""
Benchmark different solver of the same CSC univariate or multivariate problem.
This script needs the following packages:
pip install pandas pyfftw
pip install alphacsc/other/sporco
- Use bench_methods_run.py to run the benchmark.
The results are saved in alphacsc/figures.
- Use bench_methods_plot.py to plot the results.
The figures are saved in alphacsc/figures.
"""
from __future__ import print_function
import os
import time
import itertools
import numpy as np
import pandas as pd
import scipy.sparse as sp
from joblib import Parallel, delayed
import alphacsc.other.heide_csc as CSC
from sporco.admm.cbpdndl import ConvBPDNDictLearn
from alphacsc.update_d import update_d_block
from alphacsc.learn_d_z import learn_d_z
from alphacsc.learn_d_z_multi import learn_d_z_multi
from alphacsc.datasets.somato import load_data
from alphacsc.init_dict import init_dictionary
from alphacsc.utils.dictionary import get_uv
START = time.time()
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(30, 38)
##############################
# Parameters of the simulation
##############################
verbose = 1
# base string for the save names.
base_name = 'run_0'
# n_jobs for the parallel running of single core methods
n_jobs = 1
# number of random states
n_states = 1
# loop over parameters
n_times_atom_list = [32]
n_atoms_list = [2]
n_channel_list = [1]
reg_list = [10.]
######################################
# Functions compared in the benchmark
######################################
def run_admm(X, ds_init, reg, n_iter, random_state, label, max_it_d=10,
max_it_z=10):
# admm with the following differences
# - positivity constraints
# - different init
# - d step and z step are swapped
tol = | np.float64(1e-3) | numpy.float64 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/book1/supplements/autodiff_jax.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="E4bE-S8yDALH"
# # Automatic differentiation using JAX
#
# In this section, we illustrate automatic differentation using JAX.
# For details, see see [this video](https://www.youtube.com/watch?v=wG_nF1awSSY&t=697s) or [The Autodiff Cookbook](https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html).
#
#
#
#
#
# + id="eCI0G3tfDFSs"
# Standard Python libraries
from __future__ import absolute_import, division, print_function, unicode_literals
from functools import partial
import os
import time
import numpy as np
np.set_printoptions(precision=3)
import glob
import matplotlib.pyplot as plt
import PIL
import imageio
from typing import Tuple, NamedTuple
from IPython import display
# %matplotlib inline
import sklearn
# + id="Z9kAsUWYDIOk" colab={"base_uri": "https://localhost:8080/"} outputId="4568e80c-e8a5-4a81-ecb0-20a2d994b0b1"
# Load JAX
import jax
import jax.numpy as jnp
from jax import random, vmap, jit, grad, value_and_grad, hessian, jacfwd, jacrev
print("jax version {}".format(jax.__version__))
# Check the jax backend
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
key = random.PRNGKey(0)
# + [markdown] id="QuMHSd3wr1xH"
# ## Derivatives
#
# We can compute $(\nabla f)(x)$ using `grad(f)(x)`. For example, consider
#
#
# $f(x) = x^3 + 2x^2 - 3x + 1$
#
# $f'(x) = 3x^2 + 4x -3$
#
# $f''(x) = 6x + 4$
#
# $f'''(x) = 6$
#
# $f^{iv}(x) = 0$
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="jYTM5MPmr3C0" outputId="4801ef39-5c62-4fd1-c021-dc2d23e03035"
f = lambda x: x**3 + 2*x**2 - 3*x + 1
dfdx = jax.grad(f)
d2fdx = jax.grad(dfdx)
d3fdx = jax.grad(d2fdx)
d4fdx = jax.grad(d3fdx)
print(dfdx(1.))
print(d2fdx(1.))
print(d3fdx(1.))
print(d4fdx(1.))
# + [markdown] id="kUj-VuSzmFaV"
# ## Partial derivatives
#
#
# $$
# \begin{align}
# f(x,y) &= x^2 + y \\
# \frac{\partial f}{\partial x} &= 2x \\
# \frac{\partial f}{\partial y} &= 1
# \end{align}
# $$
#
# + colab={"base_uri": "https://localhost:8080/"} id="c0hW7fqfmR1c" outputId="3b5bbc43-a7e3-4692-c4e8-137faa0c68eb"
def f(x,y):
return x**2 + y
# Partial derviatives
x = 2.0; y= 3.0;
v, gx = value_and_grad(f, argnums=0)(x,y)
print(v)
print(gx)
gy = grad(f, argnums=1)(x,y)
print(gy)
# + [markdown] id="VTIybB8b4ar0"
# ## Gradients
# + [markdown] id="Xb0gZ_1HBEyC"
# Linear function: multi-input, scalar output.
#
# $$
# \begin{align}
# f(x; a) &= a^T x\\
# \nabla_x f(x;a) &= a
# \end{align}
# $$
# + colab={"base_uri": "https://localhost:8080/"} id="UmYqFEs04vkV" outputId="311e8ac4-3ed4-4ad6-f545-6307390d4745"
def fun1d(x):
return jnp.dot(a, x)[0]
Din = 3; Dout = 1;
a = np.random.normal(size=(Dout, Din))
x = np.random.normal(size=(Din,))
g = grad(fun1d)(x)
assert np.allclose(g, a)
# It is often useful to get the function value and gradient at the same time
val_grad_fn = jax.value_and_grad(fun1d)
v, g = val_grad_fn(x)
print(v)
print(g)
assert np.allclose(v, fun1d(x))
assert np.allclose(a, g)
# + [markdown] id="WbgiqkF6BL1E"
# Linear function: multi-input, multi-output.
#
# $$
# \begin{align}
# f(x;A) &= A x \\
# \frac{\partial f(x;A)}{\partial x} &= A
# \end{align}
# $$
# + id="s6hkEYxV5EIx"
# We construct a multi-output linear function.
# We check forward and reverse mode give same Jacobians.
def fun(x):
return jnp.dot(A, x)
Din = 3; Dout = 4;
A = np.random.normal(size=(Dout, Din))
x = np.random.normal(size=(Din,))
Jf = jacfwd(fun)(x)
Jr = jacrev(fun)(x)
assert np.allclose(Jf, Jr)
assert np.allclose(Jf, A)
# + [markdown] id="CN5d-D7XBU9Y"
# Quadratic form.
#
# $$
# \begin{align}
# f(x;A) &= x^T A x \\
# \nabla_x f(x;A) &= (A+A^T) x
# \end{align}
# $$
# + id="9URZeX8PBbhl"
D = 4
A = np.random.normal(size=(D,D))
x = np.random.normal(size=(D,))
quadfun = lambda x: jnp.dot(x, jnp.dot(A, x))
g = grad(quadfun)(x)
assert np.allclose(g, jnp.dot(A+A.T, x))
# + [markdown] id="U9ZOhDeqCXu3"
# Chain rule applied to sigmoid function.
#
# $$
# \begin{align}
# \mu(x;w) &=\sigma(w^T x) \\
# \nabla_w \mu(x;w) &= \sigma'(w^T x) x \\
# \sigma'(a) &= \sigma(a) * (1-\sigma(a))
# \end{align}
# $$
# + colab={"base_uri": "https://localhost:8080/"} id="6q5VfLXLB7rv" outputId="0773a46f-784f-4dbe-a6b7-29dd5cd26654"
D = 4
w = np.random.normal(size=(D,))
x = np.random.normal(size=(D,))
y = 0
def sigmoid(x): return 0.5 * (jnp.tanh(x / 2.) + 1)
def mu(w): return sigmoid(jnp.dot(w,x))
def deriv_mu(w): return mu(w) * (1-mu(w)) * x
deriv_mu_jax = grad(mu)
print(deriv_mu(w))
print(deriv_mu_jax(w))
assert np.allclose(deriv_mu(w), deriv_mu_jax(w), atol=1e-3)
# + [markdown] id="nglie5m7q607"
# ## Auxiliary return values
#
# A function can return its value and other auxiliary results; the latter are not differentiated.
# + colab={"base_uri": "https://localhost:8080/"} id="QHz6zrC9qVjT" outputId="76724fd1-4e1a-4737-b252-963700fcce91"
def f(x,y):
return x**2+y, 42
(v,aux), g = value_and_grad(f, has_aux=True)(x,y)
print(v)
print(aux)
print(g)
# + [markdown] id="bBMcsg4uoKua"
# ## Jacobians
#
#
# Example: Linear function: multi-input, multi-output.
#
# $$
# \begin{align}
# f(x;A) &= A x \\
# \frac{\partial f(x;A)}{\partial x} &= A
# \end{align}
# $$
#
# + id="iPilm5H3oWcy"
# We construct a multi-output linear function.
# We check forward and reverse mode give same Jacobians.
def fun(x):
return jnp.dot(A, x)
Din = 3; Dout = 4;
A = np.random.normal(size=(Dout, Din))
x = np.random.normal(size=(Din,))
Jf = jacfwd(fun)(x)
Jr = jacrev(fun)(x)
assert np.allclose(Jf, Jr)
# + [markdown] id="mg9ValMRm_Md"
# ## Hessians
#
# Quadratic form.
#
# $$
# \begin{align}
# f(x;A) &= x^T A x \\
# \nabla_x^2 f(x;A) &= A + A^T
# \end{align}
# $$
# + id="leW9lqvinDsM"
D = 4
A = np.random.normal(size=(D,D))
x = np.random.normal(size=(D,))
quadfun = lambda x: jnp.dot(x, jnp.dot(A, x))
H1 = hessian(quadfun)(x)
assert np.allclose(H1, A+A.T)
def my_hessian(fun):
return jacfwd(jacrev(fun))
H2 = my_hessian(quadfun)(x)
assert np.allclose(H1, H2)
# + [markdown] id="MeoGcnV54YY9"
# ## Example: Binary logistic regression
# + id="Isql2l4MGfIt" colab={"base_uri": "https://localhost:8080/"} outputId="2465d977-66dc-4ea0-e5bd-5a05d4ad92ec"
def sigmoid(x): return 0.5 * (jnp.tanh(x / 2.) + 1)
def predict_single(w, x):
return sigmoid(jnp.dot(w, x)) # <(D) , (D)> = (1) # inner product
def predict_batch(w, X):
return sigmoid(jnp.dot(X, w)) # (N,D) * (D,1) = (N,1) # matrix-vector multiply
# negative log likelihood
def loss(weights, inputs, targets):
preds = predict_batch(weights, inputs)
logprobs = jnp.log(preds) * targets + jnp.log(1 - preds) * (1 - targets)
return -jnp.sum(logprobs)
D = 2
N = 3
w = jax.random.normal(key, shape=(D,))
X = jax.random.normal(key, shape=(N,D))
y = jax.random.choice(key, 2, shape=(N,)) # uniform binary labels
#logits = jnp.dot(X, w)
#y = jax.random.categorical(key, logits)
print(loss(w, X, y))
# Gradient function
grad_fun = grad(loss)
# Gradient of each example in the batch - 2 different ways
grad_fun_w = partial(grad_fun, w)
grads = vmap(grad_fun_w)(X,y)
print(grads)
assert grads.shape == (N,D)
grads2 = vmap(grad_fun, in_axes=(None, 0, 0))(w, X, y)
assert np.allclose(grads, grads2)
# Gradient for entire batch
grad_sum = jnp.sum(grads, axis=0)
assert grad_sum.shape == (D,)
print(grad_sum)
# + colab={"base_uri": "https://localhost:8080/"} id="G3BaHdT4Gj6W" outputId="12dc846e-dd7e-4907-ea76-73147ea6f77f"
# Textbook implementation of gradient
def NLL_grad(weights, batch):
X, y = batch
N = X.shape[0]
mu = predict_batch(weights, X)
g = jnp.sum(jnp.dot(jnp.diag(mu - y), X), axis=0)
return g
grad_sum_batch = NLL_grad(w, (X,y))
print(grad_sum_batch)
assert np.allclose(grad_sum, grad_sum_batch)
# + colab={"base_uri": "https://localhost:8080/"} id="S_4lRrHgpLbG" outputId="d47db27a-b171-4933-de08-58bc5bbe0c2f"
# We can also compute Hessians, as we illustrate below.
hessian_fun = hessian(loss)
# Hessian on one example
H0 = hessian_fun(w, X[0,:], y[0])
print('Hessian(example 0)\n{}'.format(H0))
# Hessian for batch
Hbatch = vmap(hessian_fun, in_axes=(None, 0, 0))(w, X, y)
print('Hbatch shape {}'.format(Hbatch.shape))
Hbatch_sum = jnp.sum(Hbatch, axis=0)
print('Hbatch sum\n {}'.format(Hbatch_sum))
# + id="QcJvgukUpWWE"
# Textbook implementation of Hessian
def NLL_hessian(weights, batch):
X, y = batch
mu = predict_batch(weights, X)
S = jnp.diag(mu * (1-mu))
H = jnp.dot(jnp.dot(X.T, S), X)
return H
H2 = NLL_hessian(w, (X,y) )
assert | np.allclose(Hbatch_sum, H2, atol=1e-2) | numpy.allclose |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL379/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = | np.array([]) | numpy.array |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Core module of the pulse drawer.
This module provides the `DrawerCanvas` which is a collection of `Chart` object.
The `Chart` object is a collection of drawings. A user can assign multiple channels
to a single chart instance. For example, we can define a chart for specific qubit
and assign all related channels to the chart. This chart-channel mapping is defined by
the function specified by ``layout.chart_channel_map`` of the stylesheet.
Because this chart instance is decoupled from the coordinate system of the plotter,
we can arbitrarily place charts on the plotter canvas, i.e. if we want to create 3D plot,
each chart may be placed on the X-Z plane and charts are arranged along the Y-axis.
Thus this data model maximizes the flexibility to generate an output image.
The chart instance is not just a container of drawings, as it also performs
data processing like binding abstract coordinates and truncating long pulses for an axis break.
Each chart object has `.parent` which points to the `DrawerCanvas` instance so that
each child chart can refer to the global figure settings such as time range and axis break.
Initialization
~~~~~~~~~~~~~~
The `DataCanvas` and `Chart` are not exposed to users as they are implicitly
initialized in the interface function. It is noteworthy that the data canvas is agnostic
to plotters. This means once the canvas instance is initialized we can reuse this data
among multiple plotters. The canvas is initialized with a stylesheet and quantum backend
information :py:class:~`qiskit.visualization.pulse_v2.device_info.DrawerBackendInfo`.
Chart instances are automatically generated when pulse program is loaded.
```python
canvas = DrawerCanvas(stylesheet=stylesheet, device=device)
canvas.load_program(sched)
canvas.update()
```
Once all properties are set, `.update` method is called to apply changes to drawings.
If the `DrawDataContainer` is initialized without backend information, the output shows
the time in units of the system cycle time `dt` and the frequencies are initialized to zero.
Update
~~~~~~
To update the image, a user can set new values to canvas and then call the `.update` method.
```python
canvas.set_time_range(2000, 3000, seconds=False)
canvas.update()
```
All stored drawings are updated accordingly. The plotter API can access to
drawings with `.collections` property of chart instance. This returns
an iterator of drawing with the unique data key.
If a plotter provides object handler for plotted shapes, the plotter API can manage
the lookup table of the handler and the drawing by using this data key.
"""
from copy import deepcopy
from enum import Enum
from functools import partial
from itertools import chain
from typing import Union, List, Tuple, Iterator, Optional
import numpy as np
from qiskit import pulse
from qiskit.pulse.transforms import target_qobj_transform
from qiskit.visualization.exceptions import VisualizationError
from qiskit.visualization.pulse_v2 import events, types, drawings, device_info
from qiskit.visualization.pulse_v2.stylesheet import QiskitPulseStyle
class DrawerCanvas:
"""Collection of `Chart` and configuration data.
Pulse channels are associated with some `Chart` instance and
drawing data object are stored in the `Chart` instance.
Device, stylesheet, and some user generators are stored in the `DrawingCanvas`
and `Chart` instances are also attached to the `DrawerCanvas` as children.
Global configurations are accessed by those children to modify
the appearance of the `Chart` output.
"""
def __init__(self,
stylesheet: QiskitPulseStyle,
device: device_info.DrawerBackendInfo):
"""Create new data container with backend system information.
Args:
stylesheet: Stylesheet to decide appearance of output image.
device: Backend information to run the program.
"""
# stylesheet
self.formatter = stylesheet.formatter
self.generator = stylesheet.generator
self.layout = stylesheet.layout
# device info
self.device = device
# chart
self.global_charts = Chart(parent=self, name='global')
self.charts = []
# visible controls
self.disable_chans = set()
self.disable_types = set()
# data scaling
self.chan_scales = dict()
# global time
self._time_range = (0, 0)
self._time_breaks = []
# title
self.fig_title = ''
@property
def time_range(self) -> Tuple[int, int]:
"""Return current time range to draw.
Calculate net duration and add side margin to edge location.
Returns:
Time window considering side margin.
"""
t0, t1 = self._time_range
total_time_elimination = 0
for t0b, t1b in self.time_breaks:
if t1b > t0 and t0b < t1:
total_time_elimination += t1b - t0b
net_duration = t1 - t0 - total_time_elimination
new_t0 = t0 - net_duration * self.formatter['margin.left_percent']
new_t1 = t1 + net_duration * self.formatter['margin.right_percent']
return new_t0, new_t1
@time_range.setter
def time_range(self, new_range: Tuple[int, int]):
"""Update time range to draw."""
self._time_range = new_range
@property
def time_breaks(self) -> List[Tuple[int, int]]:
"""Return time breaks with time range.
If an edge of time range is in the axis break period,
the axis break period is recalculated.
Raises:
VisualizationError: When axis break is greater than time window.
Returns:
List of axis break periods considering the time window edges.
"""
t0, t1 = self._time_range
axis_breaks = []
for t0b, t1b in self._time_breaks:
if t0b >= t1 or t1b <= t0:
# skip because break period is outside of time window
continue
if t0b < t0 and t1b > t1:
raise VisualizationError('Axis break is greater than time window. '
'Nothing will be drawn.')
if t0b < t0 < t1b:
if t1b - t0 > self.formatter['axis_break.length']:
new_t0 = t0 + 0.5 * self.formatter['axis_break.max_length']
axis_breaks.append((new_t0, t1b))
continue
if t0b < t1 < t1b:
if t1 - t0b > self.formatter['axis_break.length']:
new_t1 = t1 - 0.5 * self.formatter['axis_break.max_length']
axis_breaks.append((t0b, new_t1))
continue
axis_breaks.append((t0b, t1b))
return axis_breaks
@time_breaks.setter
def time_breaks(self, new_breaks: List[Tuple[int, int]]):
"""Set new time breaks."""
self._time_breaks = sorted(new_breaks, key=lambda x: x[0])
def load_program(self, program: Union[pulse.Waveform, pulse.ParametricPulse, pulse.Schedule]):
"""Load a program to draw.
Args:
program: `Waveform`, `ParametricPulse`, or `Schedule` to draw.
Raises:
VisualizationError: When input program is invalid data format.
"""
if isinstance(program, (pulse.Schedule, pulse.ScheduleBlock)):
self._schedule_loader(program)
elif isinstance(program, (pulse.Waveform, pulse.ParametricPulse)):
self._waveform_loader(program)
else:
raise VisualizationError('Data type %s is not supported.' % type(program))
# update time range
self.set_time_range(0, program.duration, seconds=False)
# set title
self.fig_title = self.layout['figure_title'](program=program, device=self.device)
def _waveform_loader(self, program: Union[pulse.Waveform, pulse.ParametricPulse]):
"""Load Waveform instance.
This function is sub-routine of py:method:`load_program`.
Args:
program: `Waveform` to draw.
"""
chart = Chart(parent=self)
# add waveform data
fake_inst = pulse.Play(program, types.WaveformChannel())
inst_data = types.PulseInstruction(t0=0,
dt=self.device.dt,
frame=types.PhaseFreqTuple(phase=0, freq=0),
inst=fake_inst,
is_opaque=program.is_parameterized())
for gen in self.generator['waveform']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(inst_data):
chart.add_data(data)
self.charts.append(chart)
def _schedule_loader(self, program: Union[pulse.Schedule, pulse.ScheduleBlock]):
"""Load Schedule instance.
This function is sub-routine of py:method:`load_program`.
Args:
program: `Schedule` to draw.
"""
program = target_qobj_transform(program, remove_directives=False)
# initialize scale values
self.chan_scales = {}
for chan in program.channels:
if isinstance(chan, pulse.channels.DriveChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.drive']
elif isinstance(chan, pulse.channels.MeasureChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.measure']
elif isinstance(chan, pulse.channels.ControlChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.control']
elif isinstance(chan, pulse.channels.AcquireChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.acquire']
else:
self.chan_scales[chan] = 1.0
# create charts
mapper = self.layout['chart_channel_map']
for name, chans in mapper(channels=program.channels,
formatter=self.formatter,
device=self.device):
chart = Chart(parent=self, name=name)
# add standard pulse instructions
for chan in chans:
chart.load_program(program=program, chan=chan)
# add barriers
barrier_sched = program.filter(instruction_types=[pulse.instructions.RelativeBarrier],
channels=chans)
for t0, _ in barrier_sched.instructions:
inst_data = types.BarrierInstruction(t0, self.device.dt, chans)
for gen in self.generator['barrier']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(inst_data):
chart.add_data(data)
# add chart axis
chart_axis = types.ChartAxis(name=chart.name, channels=chart.channels)
for gen in self.generator['chart']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(chart_axis):
chart.add_data(data)
self.charts.append(chart)
# add snapshot data to global
snapshot_sched = program.filter(instruction_types=[pulse.instructions.Snapshot])
for t0, inst in snapshot_sched.instructions:
inst_data = types.SnapshotInstruction(t0, self.device.dt, inst.label, inst.channels)
for gen in self.generator['snapshot']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(inst_data):
self.global_charts.add_data(data)
# calculate axis break
self.time_breaks = self._calculate_axis_break(program)
def _calculate_axis_break(self, program: pulse.Schedule) -> List[Tuple[int, int]]:
"""A helper function to calculate axis break of long pulse sequence.
Args:
program: A schedule to calculate axis break.
Returns:
List of axis break periods.
"""
axis_breaks = []
edges = set()
for t0, t1 in chain.from_iterable(program.timeslots.values()):
if t1 - t0 > 0:
edges.add(t0)
edges.add(t1)
edges = sorted(edges)
for t0, t1 in zip(edges[:-1], edges[1:]):
if t1 - t0 > self.formatter['axis_break.length']:
t_l = t0 + 0.5 * self.formatter['axis_break.max_length']
t_r = t1 - 0.5 * self.formatter['axis_break.max_length']
axis_breaks.append((t_l, t_r))
return axis_breaks
def set_time_range(self,
t_start: Union[int, float],
t_end: Union[int, float],
seconds: bool = True):
"""Set time range to draw.
All child chart instances are updated when time range is updated.
Args:
t_start: Left boundary of drawing in units of cycle time or real time.
t_end: Right boundary of drawing in units of cycle time or real time.
seconds: Set `True` if times are given in SI unit rather than dt.
Raises:
VisualizationError: When times are given in float without specifying dt.
"""
# convert into nearest cycle time
if seconds:
if self.device.dt is not None:
t_start = int(np.round(t_start / self.device.dt))
t_end = int(np.round(t_end / self.device.dt))
else:
raise VisualizationError('Setting time range with SI units requires '
'backend `dt` information.')
self.time_range = (t_start, t_end)
def set_disable_channel(self,
channel: pulse.channels.Channel,
remove: bool = True):
"""Interface method to control visibility of pulse channels.
Specified object in the blocked list will not be shown.
Args:
channel: A pulse channel object to disable.
remove: Set `True` to disable, set `False` to enable.
"""
if remove:
self.disable_chans.add(channel)
else:
self.disable_chans.discard(channel)
def set_disable_type(self,
data_type: types.DataTypes,
remove: bool = True):
"""Interface method to control visibility of data types.
Specified object in the blocked list will not be shown.
Args:
data_type: A drawing data type to disable.
remove: Set `True` to disable, set `False` to enable.
"""
if isinstance(data_type, Enum):
data_type_str = str(data_type.value)
else:
data_type_str = data_type
if remove:
self.disable_types.add(data_type_str)
else:
self.disable_types.discard(data_type_str)
def update(self):
"""Update all associated charts and generate actual drawing data from template object.
This method should be called before the canvas is passed to the plotter.
"""
for chart in self.charts:
chart.update()
class Chart:
"""A collection of drawing to be shown on the same line.
Multiple pulse channels can be assigned to a single `Chart`.
The parent `DrawerCanvas` should be specified to refer to the current user preference.
The vertical value of each `Chart` should be in the range [-1, 1].
This truncation should be performed in the plotter interface.
"""
# unique index of chart
chart_index = 0
# list of waveform type names
waveform_types = [str(types.WaveformType.REAL.value),
str(types.WaveformType.IMAG.value),
str(types.WaveformType.OPAQUE.value)]
def __init__(self, parent: DrawerCanvas, name: Optional[str] = None):
"""Create new chart.
Args:
parent: `DrawerCanvas` that this `Chart` instance belongs to.
name: Name of this `Chart` instance.
"""
self.parent = parent
# data stored in this channel
self._collections = dict()
self._output_dataset = dict()
# channel metadata
self.index = self._cls_index()
self.name = name or ''
self._channels = set()
# vertical axis information
self.vmax = 0
self.vmin = 0
self.scale = 1.0
self._increment_cls_index()
def add_data(self, data: drawings.ElementaryData):
"""Add drawing to collections.
If the given object already exists in the collections,
this interface replaces the old object instead of adding new entry.
Args:
data: New drawing to add.
"""
self._collections[data.data_key] = data
def load_program(self,
program: pulse.Schedule,
chan: pulse.channels.Channel):
"""Load pulse schedule.
This method internally generates `ChannelEvents` to parse the program
for the specified pulse channel. This method is called once
Args:
program: Pulse schedule to load.
chan: A pulse channels associated with this instance.
"""
chan_events = events.ChannelEvents.load_program(program, chan)
chan_events.set_config(dt=self.parent.device.dt,
init_frequency=self.parent.device.get_channel_frequency(chan),
init_phase=0)
# create objects associated with waveform
for gen in self.parent.generator['waveform']:
waveforms = chan_events.get_waveforms()
obj_generator = partial(gen,
formatter=self.parent.formatter,
device=self.parent.device)
drawing_items = [obj_generator(waveform) for waveform in waveforms]
for drawing_item in list(chain.from_iterable(drawing_items)):
self.add_data(drawing_item)
# create objects associated with frame change
for gen in self.parent.generator['frame']:
frames = chan_events.get_frame_changes()
obj_generator = partial(gen,
formatter=self.parent.formatter,
device=self.parent.device)
drawing_items = [obj_generator(frame) for frame in frames]
for drawing_item in list(chain.from_iterable(drawing_items)):
self.add_data(drawing_item)
self._channels.add(chan)
def update(self):
"""Update vertical data range and scaling factor of this chart.
Those parameters are updated based on current time range in the parent canvas.
"""
self._output_dataset.clear()
self.vmax = 0
self.vmin = 0
# waveform
for key, data in self._collections.items():
if data.data_type not in Chart.waveform_types:
continue
# truncate, assume no abstract coordinate in waveform sample
trunc_x, trunc_y = self._truncate_data(data)
# no available data points
if trunc_x.size == 0 or trunc_y.size == 0:
continue
# update y range
scale = min(self.parent.chan_scales.get(chan, 1.0) for chan in data.channels)
self.vmax = max(scale * np.max(trunc_y), self.vmax)
self.vmin = min(scale * np.min(trunc_y), self.vmin)
# generate new data
new_data = deepcopy(data)
new_data.xvals = trunc_x
new_data.yvals = trunc_y
self._output_dataset[key] = new_data
# calculate chart level scaling factor
if self.parent.formatter['control.auto_chart_scaling']:
max_val = max(abs(self.vmax),
abs(self.vmin),
self.parent.formatter['general.vertical_resolution'])
self.scale = min(1.0 / max_val, self.parent.formatter['general.max_scale'])
else:
self.scale = 1.0
# update vertical range with scaling and limitation
self.vmax = max(self.scale * self.vmax,
self.parent.formatter['channel_scaling.pos_spacing'])
self.vmin = min(self.scale * self.vmin,
self.parent.formatter['channel_scaling.neg_spacing'])
# other data
for key, data in self._collections.items():
if data.data_type in Chart.waveform_types:
continue
# truncate
trunc_x, trunc_y = self._truncate_data(data)
# no available data points
if trunc_x.size == 0 or trunc_y.size == 0:
continue
# generate new data
new_data = deepcopy(data)
new_data.xvals = trunc_x
new_data.yvals = trunc_y
self._output_dataset[key] = new_data
@property
def is_active(self) -> bool:
"""Check if there is any active waveform data in this entry.
Returns:
Return `True` if there is any visible waveform in this chart.
"""
for data in self._output_dataset.values():
if data.data_type in Chart.waveform_types and self._check_visible(data):
return True
return False
@property
def collections(self) -> Iterator[Tuple[str, drawings.ElementaryData]]:
"""Return currently active entries from drawing data collection.
The object is returned with unique name as a key of an object handler.
When the horizontal coordinate contains `AbstractCoordinate`,
the value is substituted by current time range preference.
"""
for name, data in self._output_dataset.items():
# prepare unique name
unique_id = 'chart{ind:d}_{key}'.format(ind=self.index, key=name)
if self._check_visible(data):
yield unique_id, data
@property
def channels(self) -> List[pulse.channels.Channel]:
"""Return a list of channels associated with this chart.
Returns:
List of channels associated with this chart.
"""
return list(self._channels)
def _truncate_data(self,
data: drawings.ElementaryData) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to truncate drawings according to time breaks.
# TODO: move this function to common module to support axis break for timeline.
Args:
data: Drawing object to truncate.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
xvals = self._bind_coordinate(data.xvals)
yvals = self._bind_coordinate(data.yvals)
if isinstance(data, drawings.BoxData):
# truncate box data. these object don't require interpolation at axis break.
return self._truncate_boxes(xvals, yvals)
elif data.data_type in [types.LabelType.PULSE_NAME, types.LabelType.OPAQUE_BOXTEXT]:
# truncate pulse labels. these objects are not removed by truncation.
return self._truncate_pulse_labels(xvals, yvals)
else:
# other objects
return self._truncate_vectors(xvals, yvals)
def _truncate_pulse_labels(self,
xvals: np.ndarray,
yvals: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to remove text according to time breaks.
Args:
xvals: Time points.
yvals: Data points.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
xpos = xvals[0]
t0, t1 = self.parent.time_range
if xpos < t0 or xpos > t1:
return np.array([]), np.array([])
offset_accumulation = 0
for tl, tr in self.parent.time_breaks:
if xpos < tl:
return np.array([xpos - offset_accumulation]), yvals
if tl < xpos < tr:
return | np.array([tl - offset_accumulation]) | numpy.array |
# -*- coding: utf-8 -*-
"""
package: pylie
file : stats
Various statistical methods
"""
import numpy
def rss(response, observed):
"""
Residual Sum of Squares or Error sum of squares
It is the sum of the squared difference between the experimental response y
and the response calculated by the regression model (residuals).
"""
return sum( | numpy.square(response - observed) | numpy.square |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.