prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
'''
hans_on_feedforward_neural_network.py
前馈神经网络采用小批量随机梯度下降算法。
为什么要写此模块:
1. 为了深刻理解神经网络进而理解深度学习;
2. 神经网络框架源码读不懂,还是自己实现吧;
功能:实现了 Logistic 二分类,Softmax 多分类和多元回归算法,具体见样例代码。
说明:
1. 为了提高运算效率,前馈神经网络基于 numpy 库来做矩阵运算,很少使用 for 循环;
2. 未来会逐步添加一些新的神经网络功能,比如:不同的初始化方法、正则化等...欢迎读者来更新此代码;
3. 目前该网络存在数值不稳定性,建议不要把 “relu” 家族系列激活函数放在神经网络第一层,容易造成数值不稳定,
放在其它层效果很好。而且目前 Softmax 和交叉熵是分开实现的,容易导致数值不稳定,未来会写成一个函数;
4. 毕竟我只是为了尽可能的展示原理,很少考虑代码如何更高效,所以欢迎读者来优化这份代码,尽请修改吧!
'''
# Third-party libraries
import numpy as np
# 基于Numpy的前馈神经网络
class Feedforward_neural_network(object):
def __init__(self):
# ---------------------前馈神经网络基本参数------------------------
# 样本的特征维度
self.sample_feature_dimension = None
# 神经网络每一层的维度
self.layer_Dims = None
# 神经网络每一层使用的激活函数
self.activation_function_names = None
# 神经网络使用的损失函数
self.network_loss_faction_name = None
# 存储网络参数的列表
self.Layers = []
# ---------------------函数名与函数对象映射字典------------------------
# 激活函数映射字典
self.activation_functions_dict = {'sigmoid': self.sigmoid, 'relu':self.relu,
'leaky_relu':self.leaky_relu, 'tanh':self.tanh,
'softmax': self.softmax, 'None': self.equal}
# 激活函数的导数函数的映射字典
self.activation_gradient_functions_dict = {'sigmoid': self.sigmoid_gradient, 'tanh':self.tanh_gradient,'relu':self.relu_gradient,
'leaky_relu':self.leaky_relu_gradient,'softmax': None,
'None': self.equal_gradient}
# 损失函数映射字典
self.Loss_faction_dict = {
'multi_classification_cross_entropy_loss_function': self.multi_classification_cross_entropy_loss_function,
'binary_classification_logistic_loss_function': self.binary_classification_logistic_loss_function,
'multivariable_regression_loss_function': self.multivariable_regression_loss_function}
# ----------------------初始化网络结构------------------------
# 创建网络结构的函数
def creating_network_infrastructure(self, sample_feature_dimension, layer_Dims, activation_function_names):
# 把输入层作为第 0 层,同时把它的维度插入维度数组中
layer_Dims.insert(0, sample_feature_dimension)
for layer_index in range(len(layer_Dims)):
layer = {}
#第0层是输入层,不需要激活函数和权重矩阵
if layer_index == 0:
self.Layers.append(layer)
continue
layer['activation_function_name'] = activation_function_names[layer_index - 1]
# 因为 relu 函数会把不大于 0 的值 设置为 0,导致模型在开始时候较难训练
# 所以当某一层使用 relu 作为激活函数时,该层权重矩阵使用大于 0 值的初始化方法
if layer['activation_function_name']=='relu' or layer['activation_function_name']=='leaky_relu':
layer['W'] = np.random.uniform(np.exp(-10), 1, size=(layer_Dims[layer_index], layer_Dims[layer_index - 1]))
else:
layer['W'] = np.random.randn(layer_Dims[layer_index], layer_Dims[layer_index - 1])
layer['b'] = np.zeros((layer_Dims[layer_index], 1))
self.Layers.append(layer)
# ------------------------前向传播----------------------------
def forward_propagation(self, X_batch):
# 从网络第一层往最后一层输出层逐层计算。
for layer_index, layer in enumerate(self.Layers):
# 把样本输入直接作为激活之后的值。
if layer_index == 0:
layer['A'] = X_batch
else:
layer['Z'] = np.dot(self.Layers[layer_index - 1]['A'], np.transpose(layer['W'])) + np.transpose(layer['b'])
activation_function = self.activation_functions_dict[layer['activation_function_name']]
layer['A'] = activation_function(layer['Z'])
return self.Layers[-1]['A']
# ------------------------反向传播----------------------------
def back_propagation(self, Delta_L):
# 获取最后一层的误差Delta。
self.Layers[-1]['Delta'] = Delta_L
# 误差从最后一层往第一层遍历网络,不包括第 0 层(输入层)。
for layer_index in range(len(self.Layers) - 1, 0, -1):
layer = self.Layers[layer_index]
# 如果不是最后一层
if layer_index != len(self.Layers) - 1:
activation_gradient_function = self.activation_gradient_functions_dict[layer['activation_function_name']]
layer['Delta'] = np.dot(self.Layers[layer_index + 1]['Delta'],
self.Layers[layer_index + 1]['W']) * activation_gradient_function(layer['Z'])
_batch_size = Delta_L.shape[0]
layer['C_b'] = (1.0 / _batch_size) * np.transpose((np.sum(layer['Delta'], axis=0, keepdims=True)))
layer['C_W'] = (1.0 / _batch_size) * np.dot(np.transpose(layer['Delta']), self.Layers[layer_index - 1]['A'])
# ----------------------小批量梯度下降-------------------------
def gradient_descent(self, learning_rate):
# 输入层没有参数,所以从第一层开始。
for layer in self.Layers[1:]:
layer['W'] = layer['W'] - learning_rate * layer['C_W']
layer['b'] = layer['b'] - learning_rate * layer['C_b']
# ----------------------训练和评估模型------------------------
def training_and_evaluation_model(self, X_train, Y_train, network_loss_faction_name,
epochs, learning_rate, batch_size, evaluation_model_per_epochs):
self.network_loss_faction_name = network_loss_faction_name
for epoch in range(epochs):
batch_index = 0
for batch in range(X_train.shape[0] // batch_size):
X_batch = X_train[batch_index: batch_index + batch_size]
Y_batch = Y_train[batch_index: batch_index + batch_size]
batch_index = batch_index + batch_size
# 前向传播
self.forward_propagation(X_batch)
Last_layer_output = self.Layers[-1]['A']
loss_f = self.Loss_faction_dict[network_loss_faction_name]
# 计算损失
Loss = loss_f(Y_batch, Last_layer_output)
# 损失函数对输出层的梯度
if network_loss_faction_name == 'multi_classification_cross_entropy_loss_function' \
or network_loss_faction_name == 'binary_classification_logistic_loss_function':
Delta_L = Last_layer_output - Y_batch
elif network_loss_faction_name == 'multivariable_regression_loss_function':
Delta_L = 2 * (Last_layer_output - Y_batch)
else:
Delta_L = None
# 反向传播
self.back_propagation(Delta_L)
# 梯度下降
self.gradient_descent(learning_rate)
if epoch % evaluation_model_per_epochs == 0:
if network_loss_faction_name == 'multi_classification_cross_entropy_loss_function':
accuracy_rate = self.softmax_classified_accuracy_rate(X_train, Y_train)
elif network_loss_faction_name == 'binary_classification_logistic_loss_function':
accuracy_rate = self.logistic_binary_classified_accuracy_rate(X_train, Y_train)
elif network_loss_faction_name == 'multivariable_regression_loss_function':
accuracy_rate = None
matrix_cosine_similarity = self.multivariable_matrix_cosine_similarity(X_train, Y_train)
if accuracy_rate != None:
print('accuracy_rate:\t\t {:.5f}'.format(accuracy_rate))
if network_loss_faction_name == 'multivariable_regression_loss_function':
print('matrix_cosine_similarity:\t\t {:.5f}'.format(matrix_cosine_similarity))
print('Loss:\t\t{:.5f}'.format(Loss))
# ---------------------激活函数及其导数------------------------
# 恒等激活函数
def equal(slef, F):
return F
# 恒等激活函数的导数
def equal_gradient(slef, F):
return np.ones_like(F)
# sigmoid 激活函数
def sigmoid(self,F):
return 1.0 / (1 + np.exp(-F))
# sigmoid 激活函数的导数
def sigmoid_gradient(self, F):
return self.sigmoid(F) * (1 - self.sigmoid(F))
# tanh 激活函数
def tanh(self, F):
return np.tanh(F)
# tanh 激活函数的导数
def tanh_gradient(self, F):
return 1.0 - np.tanh(F)**2
# Relu 激活函数
def relu(self, F):
return np.maximum(F, 0, F)
# Relu 激活函数的导数
def relu_gradient(self, F):
return np.where(F>0, 1, 0)
# Leaky Relu 激活函数
def leaky_relu(self, F):
# 有论文推荐用较小的 0.01 值,实践中似乎 0.2 更好
leaky_value = 0.2
return np.maximum(leaky_value * F, F, F)
# Leaky Relu 激活函数的导数
def leaky_relu_gradient(self, F):
leaky_value = 0.2
return np.where(F > 0, 1, leaky_value)
# Softmax 激活函数
def softmax(self, F):
exp = np.exp(F - np.max(F, axis=1, keepdims=True))
return exp / np.sum(exp, axis=1, keepdims=True)
#def unstable_softmax(slef, F):
# return np.exp(F) / np.sum(np.exp(F), axis=1, keepdims=True)
#---------------------损失函数------------------------
# 多元回归损失函数
def multivariable_regression_loss_function(self, Y_batch, A_L):
return np.mean(np.sum((A_L - Y_batch) ** 2, axis=1, keepdims=True))
# 多分类交叉熵损失函数
def multi_classification_cross_entropy_loss_function(self, Y_batch, A_L):
# log(x) 中 x 不能为 0,暂时办法:把 0 换成一个很小的数字 np.exp(-30)
#A_L = np.where(A_L==0, np.exp(-30), A_L)
return (-1) * np.mean(np.sum(Y_batch * np.log(A_L), axis=1, keepdims=True))
# 二分类逻辑斯蒂损失函数
def binary_classification_logistic_loss_function(self, Y_batch, A_L):
# log(x) 中 x 不能为 0,暂时办法:把 0 换成一个很小的数字 np.exp(-30)
#A_L = np.where(A_L == 0, np.exp(-30), A_L)
return (-1.0) * np.mean(
np.dot(np.transpose(Y_batch), np.log(A_L)) + np.dot(1 - np.transpose(Y_batch), np.log(1 - A_L)))
# ---------------------分类决策函数------------------------
# softmax 分类决策函数
def softmax_classified_decision(self, F):
# 取矩阵每一行最大的值
max_index = np.max(F, axis=1, keepdims=True)
# 把最大的值的位置的值设置为 1,其余的设置为 0。
one_hot_hat_Y = np.where(F == max_index, 1, 0)
return one_hot_hat_Y
# logistic 二分类决策函数
def logistic_binary_classified_decision(self, F):
# 大于 0.5 的取值为 1 否则为 0。
Y_hat = np.where(F >= 0.5, 1, 0)
return Y_hat
# -------------------模型评价函数-----------------------
# 计算矩阵余弦相似度
def multivariable_matrix_cosine_similarity(self, X, Y):
# 前向传播
Last_layer_output = self.forward_propagation(X)
# 预测函数
Y_hat = Last_layer_output
# 比较 Y_hat 和 Y 矩阵的每一行, 计算每一行之间的余弦相似度
res_D = np.sum(Y_hat * Y, axis=1, keepdims=True) / (
np.sqrt(np.sum(Y_hat ** 2, axis=1, keepdims=True)) * np.sqrt(np.sum(Y ** 2, axis=1, keepdims=True)))
return np.mean(res_D)
# 统计 softmax 模型准确率函数
def softmax_classified_accuracy_rate(self, X, Y):
# 前向传播
Last_layer_output = self.forward_propagation(X)
# 预测函数
Y_hat = self.softmax_classified_decision(Last_layer_output)
# 比较 Y_hat 和 Y 矩阵的每一行,也可以用np.argmax()来比较最大值的位置是否一样
res_D = np.mean(np.where(Y_hat == Y, 1, 0), axis=1, keepdims=True)
return np.mean( | np.where(res_D == 1, 1, 0) | numpy.where |
from hokuyolx import HokuyoLX
import numpy as np
track_width = 2.0 # wheel center to center distance of car
forward_constant = 1.0 # multiplier for speed of car, adjust for proper braking distance
car_length = 6.0 # length of car from front to back wheels, center to center
graph = True
if graph:
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.patches import Arc
def in_path(points, speed, angle):
"""
Given an array of x, y points, the speed of the cart, and the front steering angle, returns whether or not the points are in the cart's predicted path.
:param points: np.ndarray of shape (n, 2) and dtype float64 - Array of x, y data points returned from the LIDAR scanner.
:param speed: float - Speed of the golf cart
:param angle: float - Steering angle of the golf cart, in degrees. 0 is straight, positive is left and negative is right
:return: Boolean - whether there are any data points in the cart's predicted path
"""
# workaround for angle of 0
if angle < 1e-4:
angle = 1e-4
r_center = car_length * np.tan(np.radians(90 - angle)) # left turn = positive angle
# transform points to match new origin at turn center
points[:, 0] += r_center
points[:, 1] += car_length
r_cf = np.hypot(r_center, car_length) # front center radius
r_left = np.hypot(r_center - track_width / 2, car_length) # left wheel turn radius
r_right = np.hypot(r_center + track_width / 2, car_length) # right wheel turn radius
y_max = car_length + forward_constant * speed
# check if y_max is past the turning circle
y_large = np.minimum(np.fabs(r_left), np.fabs(r_right)) < y_max
if y_large:
x_min = r_center - track_width / 2 if angle < 0 else 0
x_max = r_center + track_width / 2 if angle > 0 else 0
else:
x_min = r_center - track_width / 2 if angle < 0 else \
np.sqrt(
np.power(r_left, 2) - np.power(y_max, 2)
)
x_max = r_center + track_width / 2 if angle > 0 else \
-np.sqrt(
np.power(r_right, 2) - np.power(y_max, 2)
)
if graph:
fig, ax = plt.subplots()
# ax.plot(0, 0, label='Turn Center')
ax.add_patch(Rectangle((r_center - track_width / 2, 0),
track_width, car_length))
# check if y_max is past the turning circle
if y_large:
x1 = np.linspace(r_center - track_width / 2, 0, 100) # left boundary
x2 = np.linspace(r_center + track_width / 2, 0, 100) # right boundary
else:
if angle < 0:
x1 = np.linspace(r_center - track_width / 2,
-np.sqrt(
np.power(r_left, 2) - np.power(y_max, 2)
),
100) # left boundary
x2 = np.linspace(r_center + track_width / 2,
-np.sqrt(
np.power(r_right, 2) - np.power(y_max, 2)
),
100) # right boundary
else:
x1 = np.linspace(r_center - track_width / 2,
np.sqrt(
np.power(r_left, 2) - np.power(y_max, 2)
),
100) # left boundary
x2 = np.linspace(r_center + track_width / 2,
np.sqrt(
np.power(r_right, 2) - np.power(y_max, 2)
),
100) # right boundary
y1 = np.sqrt(np.power(r_left, 2) - np.power(x1, 2))
y2 = np.sqrt(np.power(r_right, 2) - np.power(x2, 2))
ax.plot(x1, y1)
ax.plot(x2, y2)
if not y_large:
if angle < 0:
ax.plot(
(-np.sqrt(np.power(r_left, 2) - np.power(y_max, 2)), -np.sqrt(np.power(r_right, 2) - np.power(y_max, 2))),
(y_max, y_max))
else:
ax.plot(
(np.sqrt(np.power(r_left, 2) - np.power(y_max, 2)), np.sqrt(np.power(r_right, 2) - np.power(y_max, 2))),
(y_max, y_max))
# ax.plot((0, r_center + track_width / 2), (0, 0))
# ax.plot((0, r_center - track_width / 2), (0, car_length))
# ax.plot((0, r_center + track_width / 2), (0, car_length))
# ax.plot((0, r_center), (0, car_length))
# ax.add_patch(Arc((0, 0), width=r_cf * 2, height=r_cf * 2, theta1=0.0, theta2=90.0))
ax.scatter(points[:, 0], points[:, 1])
plt.show()
# plt.savefig('plt.png')
# filter points to x range
points = points[(points[:, 0] <= x_max) & (points[:, 0] >= x_min)]
# check points
if angle < 0:
return np.any(points[
(np.hypot(points[:, 0], points[:, 1]) > r_right)
&
( | np.hypot(points[:, 0], points[:, 1]) | numpy.hypot |
import copy
from PIL import Image
from PIL import ImageEnhance
from torch.utils.data import DataLoader, Dataset
import torch
import numpy as np
import glob
import torchvision
import matplotlib.pyplot as plt
import random
import cv2
from torchvision import transforms
np.seterr(divide='ignore', invalid='ignore')
def takeSecond(elem):
return elem[0]**2+elem[1]**2
class heatmap_dataset(Dataset):
def __init__(self, ds_dir, sigma, setname='train', transform=None, norm_factor=256, rgb2gray=False, resize=True):
self.ds_dir = ds_dir
self.setname = setname
self.transform = transform
self.norm_factor = norm_factor
self.rgb2gray = rgb2gray
self.__sigma = sigma
self.resize = resize
self.c = 0
self.s = 0
self.r = 0
if setname == 'train':
data = []
gt = []
train_list = '/media/home_bak/ziqi/park/Hourglass_twopoint/dataset/train.txt'
f = open(train_list)
for line in f:
line_data = line.strip('\n')
line_gt = line_data.replace(
'perspective_img', 'point').replace('.jpg', '_OA.txt')
data.append(line_data)
gt.append(line_gt)
self.data = data
self.gt = gt
if setname == 'val':
data = []
gt = []
test_list = '/media/home_bak/ziqi/park/Hourglass_twopoint/dataset/val.txt'
f = open(test_list)
for line in f:
line_data = line.strip('\n')
line_gt = line_data.replace(
'perspective_img', 'point').replace('.jpg', '_OA.txt')
data.append(line_data)
gt.append(line_gt)
self.data = data
self.gt = gt
def __len__(self):
return len(self.data)
def get_affine_transform(self, center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
print(scale)
scale = np.array([scale, scale])
scale_tmp = scale * 200
# print('scale_tmp',scale_tmp)
# print("scale_tmp: {}".format(scale_tmp))
# print("output_size: {}".format(output_size)) # W H
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = self.get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
# print("src_dir: {}".format(src_dir))
# print("dst_dir: {}".format(dst_dir))
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
# print("center: {}".format(center))
src[0, :] = center + scale_tmp * shift
# print("src[0, :]: {}".format(src[0, :]))
# print("src_dir: {}".format(src_dir))
src[1, :] = center + src_dir + scale_tmp * shift
# print("src[1, :]: {}".format(src[1, :]))
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = self.get_3rd_point(src[0, :], src[1, :])
# print("src[2:, :]: {}".format(src[2:, :]))
dst[2:, :] = self.get_3rd_point(dst[0, :], dst[1, :])
# print('src', src,dst)
# print("src:\n{}".format(src))
# print("dst:\n{}".format(dst))
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
# exit(1)
return trans
def get_dir(self, src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_3rd_point(self, a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def _box2cs(self, size, aspect_ratio=None, scale_factor=None):
x, y, w, h = 0, 0, size[0], size[1]
return self._xywh2cs(x, y, w, h,
aspect_ratio,
scale_factor)
def _xywh2cs(self, x, y, w, h, aspect_ratio, scale_factor):
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > aspect_ratio * h:
h = w * 1.0 / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / 200, h * 1.0 / 200],
dtype=np.float32)
return center, scale
def __getitem__(self, item):
if item < 0 or item >= self.__len__():
return None
# Read images
# data = Image.open(str(self.data[item]))
data = cv2.imread(str(self.data[item]))
imgPath = str(self.data[item])
gt = [[0, 0], [0, 0]]
# gt = np.loadtxt(str(self.data[item]))
with open(str(self.gt[item]), "r") as f:
lines = f.readlines() # 把全部数据文件读到一个列表lines中
# 表示矩阵的行,从0行开始
row = 0
# 把lines中的数据逐行读取出来
for line in lines:
# 处理逐行数据:strip表示把头尾的'\n'去掉,split表示以空格来分割行数据,然后把处理后的行数据返回到list列表中
list = line.strip('\n').split(' ')
gt[row][0] = float(list[0])
gt[row][1] = float(list[1])
# print("point:", list[0], list[1])
# 然后方阵A的下一行接着读
row = row + 1
if row == 2:
break
# gt.sort(key=takeSecond)
# print("file", imgPath)
H, W = 384, 384
# print(type(data))
# 数据增强
# data = self.randomBlur(data)
data = self.RandomBrightness(data)
data = self.RandomHue(data)
data = self.RandomSaturation(data)
# data = self.randomColor(data)
data = self.randomGaussian(data, mean=0.2, sigma=0.3)
data = 255 * np.array(data).astype('uint8')
data = cv2.cvtColor(np.array(data), cv2.COLOR_RGB2BGR) # PIL转cv2
if self.rgb2gray:
t = torchvision.transforms.Grayscale(1)
data = t(data)
# Convert to numpy
data = np.array(data, dtype=np.float32) / self.norm_factor
# gt = np.array(gt, dtype=np.float32) / 384
gt = np.array(gt, dtype=np.float32)
size = [384, 384]
mask = np.zeros((size[0], size[1]), dtype=np.float)
heatmaps = self._putGaussianMaps(gt, H, W, 1, self.__sigma)
heatmaps = heatmaps.astype(np.float32)
# print(heatmaps)
c, s = self._box2cs(size, aspect_ratio=1)
r = 0
# print(r)
trans = self.get_affine_transform(c, s, r, size)
# data = cv2.warpAffine(
# data, trans, (size[0], size[1]), flags=cv2.INTER_LINEAR)
mask = cv2.warpAffine(
mask, trans, (size[0], size[1]), flags=cv2.INTER_LINEAR, borderValue=255)
# Expand dims into Pytorch format
data = | np.transpose(data, (2, 0, 1)) | numpy.transpose |
import numpy as np
import matplotlib.pyplot as pyplot
import h5py
import scipy
from PIL import Image
from scipy import ndimage
# extras for debugging
import math
# dataset loader
import utils
# dataset parameters
training_path = './training_data' # path of training data containing class sub-directories (image files)
image_size = 128 # length and width to uniformly format training data
classes = ['apple', 'orange', 'banana'] # classes of images to classify
c_len = len(classes) # number of classes to be used for training
validation_size = 0.2 # randomly chosen 20% of training data to be used as validation data
# model parameters
iteration_count = 1000 # number of times to apply gradient descent
learning_rate = 0.005 # size of gradient step
show_cost = True # show cost every 100 iterations
# loading data_set object
data_set = utils.read_data_sets(training_path, image_size, classes, validation_size)
# designating training objects
original_training_images = data_set.train.images # image np.array w/ shape: (image_size, image_size, channel_depth)
original_training_labels = data_set.train.labels # class label array (exempli gratia '[1.0, 0, 0]' from apple)
training_class_set = data_set.train.class_set # class label string array (e.g. 'apple')
training_file_name = data_set.train.image_names # original unique image file names
# designating validation objects
original_validation_images = data_set.valid.images
original_validation_labels = data_set.valid.labels
validation_class_set = data_set.valid.class_set
validation_file_name = data_set.valid.image_names
"""
Reshaping data arrays using matrix transposition
flattening color pixels to single array using transpose function of image pixel matrix
*_images shape: (image_size * image_size * channel_depth, data_set_size)
*_labels shape: (data_set_size, channel_depth)
"""
training_images = original_training_images.reshape(original_training_images.shape[0], -1).T
validation_images = original_validation_images.reshape(original_validation_images.shape[0], -1).T
training_labels = original_training_labels.T
validation_labels = original_validation_labels.T
# data is now properly formatted and defined respectively
def sigmoid(z):
"""
Computing the sigmoid of z
Parameters:
-- z = w^T * x^i + b
-- w^T: specific weight associated with neuron index from previous layer
-- x^i: specific neuron value from previous layer
-- b: bias associated with neuron
Return:
s: result of applying sigmoid activation function (domain in R, returns monotonically increasing value between 0 and 1)
s = 1 / (1 + e^-z)
"""
s = 1 / (1 + np.exp(-z)) #definition of the sigmoid function
return s
def init_zero(dimension):
"""
Parameters:
-- dimension: the length of matrix to be initialized
Initializes:
-- w (weight array): zero array w/ shape: (image_size * image_size * channel_depth, 1)
-- b (bias value): as zero
"""
w = np.zeros(shape=(dimension, 3))
b = 0
# shape and type check
assert(w.shape == (dimension, 3)), "w in not in proper form: init_zero(dimension)"
assert(isinstance(b, float) or isinstance(b, int)), "b is not of type int/float"
return w, b
def cross_entropy_cost(m, A, L):
"""
Cross-Entropy Cost function of logistic-regression.
Parameters:
-- m: count of items in set
-- A: numpy array of activation values corresponding to each neuron w/ shape: (1, m)
-- L: true label array to identify true class type w/ shape: (1, m)
Return:
-- cost: negative log-probability cost for logistic regression
Notes:
-- Cross-Entropy Cost is calculated in a logarithmic fashion as prediction function (sigmoid) is non-linear.
-- 'Squaring this prediction as we do in MSE results in a non-convex function with many local minimums.
If our cost function has many local minimums, gradient descent may not find the optimal global minimum.'
-- Cross-Entropy Cost penalizes confident wrong predictions more than rewards confident correct predictions.
Calculation of Cross-Entropy Cost:
C = (-1 / m) * Sigma([L[i] * log(A[i]) + (1 - L[i]) * (log(1 - A[i]))
from i = 1 to m
"""
cost = (-1 / m) * np.sum(L * np.log(A) + (1 - L) * (np.ma.log(1 - A))) #Note: Using numpy masked array np.ma for values of log(0)
# Sanity checks
cost = np.squeeze(cost) #squeeze() removes single dimensional elements from the array: e.g. (1, 3, 1) -> (3,)
assert(cost.shape == ()) #checks if cost value is a scalar
return cost
def propagate(w, b, image_matrix, true_labels):
"""
Forwards and Backwards Propagation of Error.
Parameters:
-- w: weights numpy array w/ shape: (image_size * image_size * channel_depth, 1)
-- b: specific bias, scalar value
-- image_matrix: flattened image matrix w/ shape (image_size * image_size * channel_depth, image_matrix.shape[1])
-- true_labels: correct "label" array for each image w/ shape (1, image_matrix.shape[1])
Returns:
-- gradients: the weight and bias gradients computed from the activation layer
-- cost: the cross entropy cost of the logistic regression
"""
m = image_matrix.shape[1] # image count
"""
FORWARD PROPAGATION: output compared to actual to obtain cost (error)
-- activation_layer: sigmoid of the linear function
sigmoid(z) w/ z = w^T * x^i + b
-- cost: see cross_entropy_cost(m, A, L)
"""
activation_layer = sigmoid(np.dot(w.T, image_matrix) + b)
cost = cross_entropy_cost(m, activation_layer, true_labels)
"""
BACKWARD PROPAGATION: to obtain gradient of loss for weights and bias as to minimize error of network
-- dw: gradient of loss with respect to w
-- db: gradient of loss with respect to b
"""
dw = (1 / m) * np.dot(image_matrix, (activation_layer - true_labels).T)
db = (1 / m) * np.sum(activation_layer - true_labels)
# sanity check
assert(dw.shape == w.shape) #checks if weight gradient retains weight matrix shape
assert(db.dtype == float) #checks if bias gradient is a scalar
# format into single object for return
gradients = {
"dw": dw,
"db": db
}
return gradients, cost
def gradient_descent(w, b, image_matrix, true_labels, iteration_count, learning_rate, show_cost):
"""
Gradient Descent optimization of weights and bias scaled by learning rate parameter
Parameters:
-- w: weights array w/ shape: (image_size * image_size * channel_depth, 1)
-- b: bias scalar
-- image_matrix: flattened image matrix w/ shape (image_size * image_size * channel_depth, m)
-- true_labels: correct "label" array for each image w/ shape (1, m)
-- interation_count: the number of iterations that the function will loop through during optimization
-- learning_rate:
-- show_cost: print cost value to console every 100 iterations
Return:
-- parameters: post-step weight array and bias value
-- gradients: weight and bias gradients computed through back propagation
-- costs: cost array holding incremental cost values
Notes:
-- Other methods may be used to optimize the weights and bias
"""
costs = []
for i in range(iteration_count):
gradients, cost = propagate(w, b, image_matrix, true_labels)
# if math.isnan(cost):
# A = sigmoid(np.dot(w.T, image_matrix) + b)
# print(np.squeeze(A))
# print(cross_entropy_cost(image_matrix.shape[1], A, true_labels))
dw = gradients['dw'] # obtaining weight gradient from back propagation
db = gradients['db'] # obtaining bias gradient from back propagation
w = w - learning_rate * dw # w array stepping towards local minimum with steps of length: learning_rate
b = b - learning_rate * db # b value stepping
# appends cost value at given iteration increments to costs array for analystics
collection_rate = 1
if i % collection_rate == 0:
costs.append(cost)
# Shows cost value every 100 iterations if True
if show_cost and i % 100 == 0 and i != 0:
print('Iteration: %i, Cost: %f' % (i, cost))
parameters = {
"w": w,
"b": b
}
gradients = {
"dw": dw,
"db": db,
}
return parameters, gradients, costs
def predict(w, b, image_matrix):
"""
Makes a prediction about label using parameters obtained from learning
Parameters:
-- w: weights array w/ shape: (image_size * image_size * channel_depth, 3)
-- b: bias scalar
-- image_matrix: flattened image matrix w/ shape (image_size * image_size * channel_depth, m)
Returns:
-- prediction_labels: numpy array containing prediction labels computed from the activation layer
Notes:
"""
m = image_matrix.shape[1] # grab set size again
prediction_labels = | np.zeros((3, m)) | numpy.zeros |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_stats_utils.ipynb (unless otherwise specified).
__all__ = ['cPreProcessing', 'cStationary', 'cErrorMetrics']
# Cell
import numpy as np
import pandas as pd
from scipy.stats import boxcox, pearsonr
from scipy.special import inv_boxcox
from pandas.tseries.frequencies import to_offset
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tools.eval_measures import aic, bic
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
# Cell
class cPreProcessing():
"""
Parent class.
Methods for dealing with irregularly spaced or missing data.
"""
def __init__(self):
pass
def fget_regular_times(self, df, timestep):
"""
Generate dataframe of regularly spaced times (to impute to)
(From fbprophet/forecaster/make_future_dataframe)
Parameters
----------
df = [pandas dataframe]
timestep = [datetime timedelta object]
Returns
-------
regtimes = [pandas DatetimeIndex] of datetimes regularly spaced at timestep
"""
# normalize start date to midnight
start_date = df.ds.min().normalize()
# round up end date by one extra timestep
end_date = (df.ds.max() + timestep).normalize()
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html
regtimes = pd.date_range(start=start_date, end=end_date, freq=to_offset(timestep))
return regtimes
def finterleaf(self, df, impute_times):
"""
Interleaf dataframe with new prediction times
Set values at prediction dates as NaN so can use imputer
Parameters
----------
df = [pandas dataframe]
impute_times = [pandas DatetimeIndex] (format of regtimes)
Returns
-------
dft = pandas dataframe (format for use in fimpute)
"""
# if impute_times taken from fmake_regular_times()
if type(impute_times) == pd.core.indexes.datetimes.DatetimeIndex:
impute_times = pd.DataFrame(impute_times)
impute_times.columns = ["ds"]
# set date index
df.set_index('ds', inplace=True)
impute_times.set_index('ds', inplace=True)
# combine (interleaf)
dft = pd.concat([df, impute_times], sort=True)
dft.sort_values(by=["ds"], inplace=True)
# remove duplicate entries
dft = dft[dft.index.duplicated() == False]
return dft
def fimpute(self, df, method="time"):
"""
Imputation of data to new timestamps with NaN value.
Parameters
----------
df = dataframe containing original data and NaNs at timestamps for imputation
timestamps are the df index
Returns
-------
dfi = imputed dataframe
"""
# interpolates halfway, doesn´t account for weighting towards closer time
if method == "interp":
dfi = df.interpolate()
# missing values given mean value over whole time series
if method == "mean":
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(df)
dfi = imp.transform(df)
# linear interpolation weighted by timestamp proximity
if method == "time":
dfi = df.interpolate(method='time')
# smoothing
if method == "akima":
dfi = df.interpolate(method='akima')
return dfi
def fmake_regular_freq(self, df, timestep, method="time"):
"""
Interpolate data so regular update frequency throughout dataset.
(Deal with missing values)
Some python functions (e.g. seasonal_decompose, AutoArima) require a data "freq" argument
to determine seasonality. (Can be inferred from df.index.freq, df.index.inferred_freq)
Such functions require a constant data frequency.
Parameters
----------
df = irregularly space dataframe (with datestamp name "ds")
timestep = desired update frequency of data (timedelta object)
method = imputation method
Returns
-------
dff = imputed regularly spaced [pandas dataframe]
"""
# 0. preprocess: if dataframe alreay has time as index: reset and add as column
if df.index.name == "ds":
# df_lim["ds"] = df_lim.index
df.reset_index(level=0, inplace=True)
# 1. include in dataset times where you want to impute (and set to NaN values)
impute_times = self.fget_regular_times(df, timestep)
dft = self.finterleaf(df, impute_times)
# 2. impute with chosen method
dfi = self.fimpute(dft, method=method)
# 3. remove original data not at correct timestep
dff = dfi[dfi.index.isin(impute_times) == True]
if dff.index.freq == None:
dff.index.freq = to_offset(timestep)
return dff
# Cell
class cStationary(cPreProcessing):
"""
methods for checking whether time series is stationary
methods for transforming the time series into a stationary time series
methods for obtaining (p,q,d) ARIMA parameters
https://towardsdatascience.com/detecting-stationarity-in-time-series-data-d29e0a21e638
"""
def __init__(self):
pass
def fadf_verbose(self, adftest):
"""
CHECK STATIONARITY.
Print explanation of output of Augmented Dickey-Fuller test.
The Augmented Dickey-Fuller test is a type of statistical test called a unit root test.
The intuition behind a unit root test is that it determines how strongly a time series is defined by a trend.
Parameters
----------
adftest = adfuller(data.y, regression="ct")
Returns
-------
"""
print("""
Augmented Dickey-Fuller:
Null hypothesis: the time series can be represented by a unit root, thus not stationary (has some time-dependent structure)
""")
output = pd.Series(adftest[0:4], index=['Test Statistic','pvalue','#Lags Used','Number of Observations Used'])
for key,value in adftest[4].items():
output['Critical Value ({})'.format(key)] = value
print(output)
if output.pvalue <= 0.05:
print("\nReject the null hypothesis (H0), the data does not have a unit root and IS STATIONARY.")
return True
else:
print("\nFail to reject the null hypothesis (H0), the data has a unit root and is NON-STATIONARY.")
return False
def fkpss_verbose(self, kpsstest):
"""
CHECK STATIONARITY.
Print explanation of output of Kwiatkowski-Phillips-Schmidt-Shin test.
Another test for checking the stationarity of a time series (reversed null hypothesis to ADF).
In KPSS test, to turn ON the stationarity testing around a trend, you need to explicitly pass the regression='ct'.
A major difference between KPSS and ADF tests:
the capability of the KPSS test to check for stationarity in the ‘presence of a deterministic trend’.
Parameters
----------
kpsstest = kpss(data.y, regression="ct")
Returns
-------
"""
print("""
Kwiatkowski-Phillips-Schmidt-Shin:
Null hypothesis: the process is trend stationary
""")
output = pd.Series(kpsstest[0:3], index=['Test Statistic','pvalue','Lags Used'])
for key,value in kpsstest[3].items():
output['Critical Value ({})'.format(key)] = value
print (output)
if output.pvalue <= 0.05:
print("\nReject the null hypothesis (H0), the data has a unit root and is NON-STATIONARY.")
return False
else:
print("\nFail to reject the null hypothesis (H0),the data does not have a unit root and IS STATIONARY. ")
return True
def fstationary_verbose(self, stat_adf, stat_kpss):
"""
CHECK STATIONARITY.
Compare results of adf and kpss tests and advise how to make stationary.
Returns
-------
"""
if (stat_adf is False) and (stat_kpss is False):
print("\nBoth tests conclude that the series is not stationary -> series is not stationary")
return False
elif (stat_adf is True) and (stat_kpss is True):
print("\nBoth tests conclude that the series is stationary -> series is stationary")
return True
elif (stat_adf is False) and (stat_kpss is True):
print("\nKPSS = stationary and ADF = not stationary -> trend stationary, use power tranform to make stationary")
return False
elif (stat_adf is True) and (stat_kpss is False):
print("\nKPSS = not stationary and ADF = stationary -> difference stationary, use differencing transform to make stationary")
return False
def fcheck_stationary(self, y, verbose=True):
"""
CHECK STATIONARITY.
Parameters
----------
y = time series variable, data.y
Returns
-------
stationary status [bool]
"""
#df.dropna()
adftest = adfuller(y, regression="ct")
kpsstest = kpss(y, regression="ct")
if verbose:
stat_adf = self.fadf_verbose(adftest)
stat_kpss = self.fkpss_verbose(kpsstest)
stat = self.fstationary_verbose(stat_adf, stat_kpss)
return stat
def fdecompose(self, df, model="additive"):
"""
CHECK STATIONARITY.
Seasonal decomposition using moving averages
https://www.statsmodels.org/stable/generated/statsmodels.tsa.seasonal.seasonal_decompose.html
Time series must be regularly spaced (have constant frequency, dff.index.freq or dff.index.inferred_freq)
Parameters
----------
df = data frame with date index (to infer frequency)
"""
s = seasonal_decompose(df, model=model)
trend = s.trend
plt.plot(trend)
plt.title("Trend")
plt.show()
seasonal = s.seasonal
plt.plot(seasonal)
plt.title("Seasonal component")
plt.show()
resid = s.resid
plt.plot(resid)
plt.title("Residuals")
plt.show()
def fcheck_density(self, y):
"""
CHECK STATIONARITY.
Plot histogram and density trend (check gaussianity)
"""
plt.figure(1)
plt.subplot(211)
plt.hist(y)
plt.title("Data Distribution")
plt.subplot(212)
y.plot(kind='kde')
plt.show()
def fcheck_lag(self, y):
"""
CHECK STATIONARITY.
Plot lag scatter, autocorrelation and partial autocorrelation functions
For differencing and establishing (p,q,d) values for ARIMA
"""
plt.figure()
pd.plotting.lag_plot(y)
plt.title("Lag-1 plot")
plt.plot()
plt.figure()
pd.plotting.autocorrelation_plot(y)
plt.title("Autocorrelation")
plt.plot()
# contains confidence interval:
# correlation values outside of this code are very likely a correlation and not a statistical fluke
plot_acf(y)
plot_pacf(y)
def fdifferencing(self, df, interval=1):
"""
MAKE STATIONARY. (difference stationary)
adf and kpss can give the d value required by ARIMA
Make series stationary: In order to satisfy the assumption, it is necessary to make the series stationary.
This would include checking the stationarity of the series and performing required transformations
Determine d value: For making the series stationary, the number of times the difference operation was
performed will be taken as the d value.
The auro_arima function works by conducting differencing tests
(i.e., Kwiatkowski–Phillips–Schmidt–Shin, Augmented Dickey-Fuller or Phillips–Perron)
to determine the order of differencing, d. Canova-Hansen test for seasonal stability.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.diff.html
"""
# df.dropna()
return df.diff(periods=interval)
def fboxcox(self, df):
"""
MAKE STATIONARY. (trend stationary)
[https://mode.com/example-gallery/forecasting_prophet_python_cookbook/]
Often in forecasting, you’ll explicitly choose a specific type of power transform to apply to the data
to remove noise before feeding the data into a forecasting model (e.g. a log transform or square root
transform, amongst others). However, it can sometimes be difficult to determine which type of power
transform is appropriate for your data. This is where the Box-Cox Transform comes in. Box-Cox Transforms
are data transformations that evaluate a set of lambda coefficients (λ) and selects the value that
achieves the best approximation of normality.
Prophet natively models the increase in mean of the data over time,
but we should take additional steps to normalize as much variance as possible
to achieve the most accurate forecasting results.
We can do this by applying a power transform to our data.
[https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.boxcox.html]
inverse transform (after fitting and forecasting):
forecast[['yhat','yhat_upper','yhat_lower']] = forecast[['yhat','yhat_upper','yhat_lower']].apply(lambda x: inv_boxcox(x, lam))
Parameters
----------
df = dataframe with y component to be transformed
constraints: data must be positive (and non-zero)
Returns
-------
df = original dataframe with additional tranformed y column
lam = lambda value of power transform determined by boxcox, needed for inversing the transformation
constraints: lambda must not be negative
"""
df['y_trans'], lam = boxcox(df['y'])
return df, lam
# Cell
class cErrorMetrics():
"""
methods for evaluating error
"""
def __init__(self):
pass
def fmse_manual(self, y_true, y_pred):
"""
Mean square error.
Manual implementation.
"""
return np.sum( ( | np.array(y_true) | numpy.array |
"""
Methods to characterize image textures.
"""
import numpy as np
from .._shared.utils import assert_nD
from ._texture import _glcm_loop, _local_binary_pattern
def greycomatrix(image, distances, angles, levels=256, symmetric=False,
normed=False):
"""Calculate the grey-level co-occurrence matrix.
A grey level co-occurrence matrix is a histogram of co-occurring
greyscale values at a given offset over an image.
Parameters
----------
image : array_like of uint8
Integer typed input image. The image will be cast to uint8, so
the maximum value must be less than 256.
distances : array_like
List of pixel pair distance offsets.
angles : array_like
List of pixel pair angles in radians.
levels : int, optional
The input image should contain integers in [0, levels-1],
where levels indicate the number of grey-levels counted
(typically 256 for an 8-bit image). The maximum value is
256.
symmetric : bool, optional
If True, the output matrix `P[:, :, d, theta]` is symmetric. This
is accomplished by ignoring the order of value pairs, so both
(i, j) and (j, i) are accumulated when (i, j) is encountered
for a given offset. The default is False.
normed : bool, optional
If True, normalize each matrix `P[:, :, d, theta]` by dividing
by the total number of accumulated co-occurrences for the given
offset. The elements of the resulting matrix sum to 1. The
default is False.
Returns
-------
P : 4-D ndarray
The grey-level co-occurrence histogram. The value
`P[i,j,d,theta]` is the number of times that grey-level `j`
occurs at a distance `d` and at an angle `theta` from
grey-level `i`. If `normed` is `False`, the output is of
type uint32, otherwise it is float64.
References
----------
.. [1] The GLCM Tutorial Home Page,
http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
.. [2] Pattern Recognition Engineering, <NAME> & <NAME>.
Smith
.. [3] Wikipedia, http://en.wikipedia.org/wiki/Co-occurrence_matrix
Examples
--------
Compute 2 GLCMs: One for a 1-pixel offset to the right, and one
for a 1-pixel offset upwards.
>>> image = np.array([[0, 0, 1, 1],
... [0, 0, 1, 1],
... [0, 2, 2, 2],
... [2, 2, 3, 3]], dtype=np.uint8)
>>> result = greycomatrix(image, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4], levels=4)
>>> result[:, :, 0, 0]
array([[2, 2, 1, 0],
[0, 2, 0, 0],
[0, 0, 3, 1],
[0, 0, 0, 1]], dtype=uint32)
>>> result[:, :, 0, 1]
array([[1, 1, 3, 0],
[0, 1, 1, 0],
[0, 0, 0, 2],
[0, 0, 0, 0]], dtype=uint32)
>>> result[:, :, 0, 2]
array([[3, 0, 2, 0],
[0, 2, 2, 0],
[0, 0, 1, 2],
[0, 0, 0, 0]], dtype=uint32)
>>> result[:, :, 0, 3]
array([[2, 0, 0, 0],
[1, 1, 2, 0],
[0, 0, 2, 1],
[0, 0, 0, 0]], dtype=uint32)
"""
assert_nD(image, 2)
assert_nD(distances, 1, 'distances')
assert_nD(angles, 1, 'angles')
assert levels <= 256
image = np.ascontiguousarray(image)
assert image.min() >= 0
assert image.max() < levels
image = image.astype(np.uint8)
distances = np.ascontiguousarray(distances, dtype=np.float64)
angles = np.ascontiguousarray(angles, dtype=np.float64)
P = np.zeros((levels, levels, len(distances), len(angles)),
dtype=np.uint32, order='C')
# count co-occurences
_glcm_loop(image, distances, angles, levels, P)
# make each GLMC symmetric
if symmetric:
Pt = | np.transpose(P, (1, 0, 2, 3)) | numpy.transpose |
# Problem Set 4. Quant Macro - <NAME>
# In collaboration with <NAME>
# Import packages
import numpy as np
from numpy import vectorize
import timeit
import matplotlib.pyplot as plt
#%% Steady state setting h = 1 -> eliminate the part of the utility with h
# Parametrization of the model:
theeta = 0.679 # labor share
beta = 0.988 # discount factor
delta = 0.013 # depreciation rate
# For computing the steady state we normalize output to one:
y = 1
h = 1
kss = 42.55
iss = delta
css = 1 - delta
#%% CHEBYSHEV
start = timeit.default_timer()
# Discretize the variable of interest:
ki = np.array(np.linspace(0.01, 50, 120))
ki = np.tile(ki, 120)
ki = np.split(ki, 120)
ki = np.transpose(ki)
kj = np.array( | np.linspace(0.01, 50, 120) | numpy.linspace |
"""
Copyright (c) 2010-2018 CNRS / Centre de Recherche Astrophysique de Lyon
Copyright (c) 2012-2017 <NAME> <<EMAIL>>
Copyright (c) 2014-2019 <NAME> <<EMAIL>>
Copyright (c) 2016 <NAME> <<EMAIL>>
Copyright (c) 2016-2019 <NAME> <<EMAIL>>
Copyright (c) 2018-2019 <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import types
import astropy.units as u
from astropy.io import fits
from astropy.stats import gaussian_sigma_to_fwhm, gaussian_fwhm_to_sigma
from astropy.convolution import convolve, Box1DKernel
from os.path import join, abspath, dirname
from scipy import interpolate, signal
from scipy.optimize import leastsq
from . import ABmag_filters, wavelet1D
from .arithmetic import ArithmeticMixin
from .data import DataArray
from .fitting import Gauss1D
from .objs import flux2mag
__all__ = ('Spectrum', 'vactoair', 'airtovac')
def vactoair(vacwl):
"""Calculate the approximate wavelength in air for vacuum wavelengths.
Parameters
----------
vacwl : ndarray
Vacuum wavelengths.
This uses an approximate formula from the IDL astronomy library
https://idlastro.gsfc.nasa.gov/ftp/pro/astro/vactoair.pro
"""
wave2 = vacwl * vacwl
n = 1.0 + 2.735182e-4 + 131.4182 / wave2 + 2.76249e8 / (wave2 * wave2)
# Do not extrapolate to very short wavelengths.
if not isinstance(vacwl, np.ndarray):
if vacwl < 2000:
n = 1.0
else:
ignore = np.where(vacwl < 2000)
n[ignore] = 1.0
return vacwl / n
def airtovac(airwl):
"""Convert air wavelengths to vacuum wavelengths.
Parameters
----------
vacwl : ndarray
Vacuum wavelengths.
This uses the IAU standard as implemented in the IDL astronomy library
https://idlastro.gsfc.nasa.gov/ftp/pro/astro/airtovac.pro
"""
sigma2 = (1e4 / airwl)**2. # Convert to wavenumber squared
n = 1.0 + (6.4328e-5 + 2.94981e-2 / (146. - sigma2) +
2.5540e-4 / (41. - sigma2))
if not isinstance(airwl, np.ndarray):
if airwl < 2000:
n = 1.0
else:
ignore = np.where(airwl < 2000)
n[ignore] = 1.0
return airwl * n
class Spectrum(ArithmeticMixin, DataArray):
"""Spectrum objects contain 1D arrays of numbers, optionally
accompanied by corresponding variances. These numbers represent
sample fluxes along a regularly spaced grid of wavelengths.
The spectral pixel values and their variances, if any, are
available as arrays[q that can be accessed via properties of the
Spectrum object called .data and .var, respectively. These arrays
are usually masked arrays, which share a boolean masking array
that can be accessed via a property called .mask. In principle,
these arrays can also be normal numpy arrays without masks, in
which case the .mask property holds the value,
numpy.ma.nomask. However non-masked arrays are only supported by a
subset of mpdaf functions at this time, so masked arrays should be
used where possible.
When a new Spectrum object is created, the data, variance and mask
arrays can either be specified as arguments, or the name of a FITS
file can be provided to load them from.
Parameters
----------
filename : string
An optional FITS file name from which to load the spectrum.
None by default. This argument is ignored if the data
argument is not None.
ext : int or (int,int) or string or (string,string)
The optional number/name of the data extension
or the numbers/names of the data and variance extensions.
wave : `mpdaf.obj.WaveCoord`
The wavelength coordinates of the spectrum.
unit : str or `astropy.units.Unit`
The physical units of the data values. Defaults to
`astropy.units.dimensionless_unscaled`.
data : float array
An optional 1 dimensional array containing the values of each
pixel of the spectrum, stored in ascending order of wavelength
(None by default). Where given, this array should be 1
dimensional.
var : float array
An optional 1 dimensional array containing the estimated
variances of each pixel of the spectrum, stored in ascending
order of wavelength (None by default).
Attributes
----------
filename : string
The name of the originating FITS file, if any. Otherwise None.
unit : `astropy.units.Unit`
The physical units of the data values.
primary_header : `astropy.io.fits.Header`
The FITS primary header instance, if a FITS file was provided.
data_header : `astropy.io.fits.Header`
The FITS header of the DATA extension.
wave : `mpdaf.obj.WaveCoord`
The wavelength coordinates of the spectrum.
"""
# Tell the DataArray base-class that Spectrum objects require 1 dimensional
# data arrays and wavelength coordinates.
_ndim_required = 1
_has_wave = True
def subspec(self, lmin, lmax=None, unit=u.angstrom):
"""Return the flux at a given wavelength, or the sub-spectrum
of a specified wavelength range.
A single flux value is returned if the lmax argument is None
(the default), or if the wavelengths assigned to the lmin and
lmax arguments are both within the same pixel. The value that
is returned is the value of the pixel whose wavelength is
closest to the wavelength specified by the lmin argument.
Note that is a wavelength range is asked for, a view on the original
spectrum is returned and both will be modified at the same time. If
you need to modify only the sub-spectrum, you'll need to copy() it
before.
Parameters
----------
lmin : float
The minimum wavelength of a wavelength range, or the wavelength
of a single pixel if lmax is None.
lmax : float or None
The maximum wavelength of the wavelength range.
unit : `astropy.units.Unit`
The wavelength units of the lmin and lmax arguments. The
default is angstroms. If unit is None, then lmin and lmax
are interpreted as array indexes within the spectrum.
Returns
-------
out : float or `~mpdaf.obj.Spectrum`
"""
if self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
if lmax is None:
lmax = lmin
# Are lmin and lmax array indexes?
if unit is None:
pix_min = max(0, int(lmin + 0.5))
pix_max = min(self.shape[0], int(lmax + 0.5))
# Convert wavelengths to the nearest spectrum array indexes.
else:
pix_min = max(0, self.wave.pixel(lmin, nearest=True, unit=unit))
pix_max = min(self.shape[0],
self.wave.pixel(lmax, nearest=True, unit=unit) + 1)
# If the start and end of the wavelength range select the same pixel,
# return just the value of that pixel.
if (pix_min + 1) == pix_max:
return self[pix_min]
# Otherwise return a sub-spectrum.
else:
return self[pix_min:pix_max]
def get_step(self, unit=None):
"""Return the wavelength step size.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned step-size.
Returns
-------
out : float
The width of a spectrum pixel.
"""
if self.wave is not None:
return self.wave.get_step(unit)
def get_start(self, unit=None):
"""Return the wavelength value of the first pixel of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelength.
Returns
-------
out : float
The wavelength of the first pixel of the spectrum.
"""
if self.wave is not None:
return self.wave.get_start(unit)
def get_end(self, unit=None):
"""Return the wavelength of the last pixel of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelength.
Returns
-------
out : float
The wavelength of the final pixel of the spectrum.
"""
if self.wave is not None:
return self.wave.get_end(unit)
def get_range(self, unit=None):
"""Return the wavelength range (Lambda_min, Lambda_max) of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelengths.
Returns
-------
out : float array
The minimum and maximum wavelengths.
"""
if self.wave is not None:
return self.wave.get_range(unit)
def mask_region(self, lmin=None, lmax=None, inside=True, unit=u.angstrom):
"""Mask spectrum pixels inside or outside a wavelength range, [lmin,lmax].
Parameters
----------
lmin : float
The minimum wavelength of the range, or None to choose the
wavelength of the first pixel in the spectrum.
lmax : float
The maximum wavelength of the range, or None to choose the
wavelength of the last pixel in the spectrum.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax. If None, lmin and
lmax are assumed to be pixel indexes.
inside : bool
If True, pixels inside the range [lmin,lmax] are masked.
If False, pixels outside the range [lmin,lmax] are masked.
"""
if self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
else:
if lmin is None:
pix_min = 0
else:
if unit is None:
pix_min = max(0, int(lmin + 0.5))
else:
pix_min = max(0, self.wave.pixel(lmin, nearest=True,
unit=unit))
if lmax is None:
pix_max = self.shape[0]
else:
if unit is None:
pix_max = min(self.shape[0], int(lmax + 0.5))
else:
pix_max = min(self.shape[0],
self.wave.pixel(lmax, nearest=True,
unit=unit) + 1)
if inside:
self.data[pix_min:pix_max] = np.ma.masked
else:
self.data[:pix_min] = np.ma.masked
self.data[pix_max + 1:] = np.ma.masked
def _wavelengths_to_slice(self, lmin, lmax, unit):
"""Return the slice that selects a specified wavelength range.
Parameters
----------
lmin : float
The minimum wavelength of a wavelength range, or the wavelength
of a single pixel if lmax is None.
lmax : float or None
The maximum wavelength of the wavelength range.
unit : `astropy.units.Unit`
The wavelength units of the lmin and lmax arguments. The
default is angstroms. If unit is None, then lmin and lmax
are interpreted as array indexes within the spectrum.
Returns
-------
out : slice
The slice needed to select pixels within the specified wavelength
range.
"""
if unit is not None and self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
# Get the pixel index that corresponds to the minimum wavelength.
if lmin is None:
i1 = 0
else:
if unit is None:
if lmin > self.shape[0]:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i1 = max(0, int(lmin + 0.5))
else:
i1 = self.wave.pixel(lmin, nearest=False, unit=unit)
if i1 > self.shape[0]:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i1 = self.wave.pixel(lmin, nearest=True, unit=unit)
# Get the pixel index that corresponds to the maximum wavelength.
if lmax is None:
i2 = self.shape[0]
else:
if unit is None:
if lmax < 0:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i2 = min(self.shape[0], int(lmax + 0.5))
else:
i2 = self.wave.pixel(lmax, nearest=False, unit=unit)
if i2 < 0:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i2 = self.wave.pixel(lmax, nearest=True, unit=unit) + 1
return slice(i1, i2)
def _interp(self, wavelengths, spline=False):
"""return the interpolated values corresponding to the wavelength
array.
Parameters
----------
wavelengths : array of float
wavelength values
unit : `astropy.units.Unit`
Type of the wavelength coordinates
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
lbda = self.wave.coord()
data = np.pad(self.data.compressed(), 1, 'edge')
w = np.concatenate(([self.get_start() - 0.5 * self.get_step()],
np.compress(~self._mask, lbda),
[self.get_end() + 0.5 * self.get_step()]))
if spline:
if self._var is not None:
_weight = 1. / np.sqrt(np.abs(self.var.filled(np.inf)))
if self.mask is np.ma.nomask:
weight = np.empty(self.shape + 2, dtype=float)
weight[1:-1] = _weight
else:
ksel = np.where(self.mask == False)
weight = np.empty(np.shape(ksel)[1] + 2)
weight[1:-1] = _weight[ksel]
weight[0] = weight[1]
weight[-1] = weight[-2]
else:
weight = None
tck = interpolate.splrep(w, data, w=weight)
return interpolate.splev(wavelengths, tck, der=0)
else:
f = interpolate.interp1d(w, data)
return f(wavelengths)
def _interp_data(self, spline=False):
"""Return data array with interpolated values for masked pixels.
Parameters
----------
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
if np.count_nonzero(self._mask) in (0, self.shape[0]):
return self._data
lbda = self.wave.coord()
wnew = lbda[self._mask]
data = self._data.copy()
data[self._mask] = self._interp(wnew, spline)
return data
def interp_mask(self, spline=False):
"""Interpolate masked pixels.
Parameters
----------
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
self.data = np.ma.masked_invalid(self._interp_data(spline))
def rebin(self, factor, margin='center', inplace=False):
"""Combine neighboring pixels to reduce the size of a spectrum by an
integer factor.
Each output pixel is the mean of n pixels, where n is the
specified reduction factor.
Parameters
----------
factor : int
The integer reduction factor by which the spectrum should
be shrunk.
margin : string in 'center'|'right'|'left'|'origin'
When the dimension of the input spectrum is not an integer
multiple of the reduction factor, the spectrum is
truncated to remove just enough pixels that its length is
a multiple of the reduction factor. This sub-spectrum is
then rebinned in place of the original spectrum. The
margin parameter determines which pixels of the input
spectrum are truncated, and which remain.
The options are:
'origin' or 'center':
The start of the output spectrum is coincident
with the start of the input spectrum.
'center':
The center of the output spectrum is aligned
with the center of the input spectrum, within
one pixel.
'right':
The end of the output spectrum is coincident
with the end of the input spectrum.
inplace : bool
If False, return a rebinned copy of the spectrum (the default).
If True, rebin the original spectrum in-place, and return that.
Returns
-------
out : Spectrum
"""
# Delegate the rebinning to the generic DataArray function.
return self._rebin(factor, margin, inplace)
def _decimation_filter(self, newstep, atten, unit=None):
"""This is a private function Spectrum.resample(), used to apply
a decimation filter prior to resampling.
Parameters
----------
step : float
The new pixel size along the wavelength axis of the spectrum.
atten : float
The minimum attenuation (dB), of the antialiasing
decimation filter at the Nyquist folding frequency of the
new pixel size. Larger attenuations suppress aliasing
better at the expense of worsened resolution. A good value
to choose is 40dB, which produces a response that is very
similar to a blackman filter applied within the Fourier
plane, but with less ringing in the image plane.
unit : `astropy.units.Unit`
The wavelength units of the step argument. A value of None
is equivalent to specifying self.wave.unit.
"""
# Convert the attenuation from dB to a linear scale factor.
gcut = 10.0**(-atten / 20.0)
# Calculate the Nyquist folding frequency of the new pixel size.
nyquist_folding_freq = 0.5 / newstep
# Calculate the standard deviation of a Gaussian whose Fourier
# transform drops from unity at the center to gcut at the Nyquist
# folding frequency.
sigma = (0.5 / np.pi / nyquist_folding_freq *
np.sqrt(-2.0 * np.log(gcut)))
# Convert the standard deviation from wavelength units to input pixels.
sigma /= self.get_step(unit=unit)
# Choose dimensions for the gaussian filtering kernel. Choose an
# extent from -4*sigma to +4*sigma. This truncates the gaussian
# where it drops to about 3e-4 of its peak. The following
# calculation ensures that the dimensions of the array are odd, so
# that the gaussian will be symmetrically sampled either side of a
# central pixel. This prevents spectral shifts.
gshape = int(np.ceil(4.0 * sigma)) * 2 + 1
# fftconvolve requires that the kernel be no larger than the array
# that it is convolving, so reduce the size of the kernel array if
# needed. Be careful to choose an odd sized array.
n = self.shape[0]
if gshape > n:
gshape = n if n % 2 != 0 else (n - 1)
# Sample the gaussian filter symmetrically around the central pixel.
gx = np.arange(gshape, dtype=float) - gshape // 2
gy = np.exp(-0.5 * (gx / sigma)**2)
# Area-normalize the gaussian profile.
gy /= gy.sum()
# Filter the spectrum with the gaussian filter.
self.fftconvolve(gy, inplace=True)
def resample(self, step, start=None, shape=None, unit=u.angstrom,
inplace=False, atten=40.0, cutoff=0.25):
"""Resample a spectrum to have a different wavelength interval.
Parameters
----------
step : float
The new pixel size along the wavelength axis of the spectrum.
start : float
The wavelength at the center of the first pixel of the resampled
spectrum. If None (the default) the center of the first pixel
has the same wavelength before and after resampling.
unit : `astropy.units.Unit`
The wavelength units of the step and start arguments.
The default is u.angstrom.
shape : int
The dimension of the array of the new spectrum (ie. the number
of spectral pixels). If this is not specified, the shape is
selected to encompass the wavelength range from the chosen
start wavelength to the ending wavelength of the input spectrum.
inplace : bool
If False, return a resampled copy of the spectrum (the default).
If True, resample the original spectrum in-place, and return that.
atten : float
The minimum attenuation (dB), of the antialiasing
decimation filter at the Nyquist folding frequency of the
new pixel size. Larger attenuations suppress aliasing
better at the expense of worsened resolution. The default
attenuation is 40.0 dB. To disable antialiasing, specify
atten=0.0.
cutoff : float
Mask each output pixel of which at least this fraction of the
pixel was interpolated from masked input pixels.
Returns
-------
out : Spectrum
"""
out = self if inplace else self.copy()
# Don't allow the spectrum to be started beyond the far end of
# the spectrum, because this would result in an empty spectrum.
if start is not None and start > self.get_end(unit):
raise ValueError('The start value is past the end of the '
'spectrum range')
# Get wavelength world coordinates of the output spectrum.
newwave = self.wave.resample(step, start, unit)
# How many pixels should there be in the resampled spectrum?
# If the user didn't specify this, use newwave.shape, which
# holds the number of pixels of size 'step' needed to sample
# from 'start' to the end of the current wavelength range.
if shape is not None:
newwave.shape = shape
# Get the existing wavelength step size in the new units.
oldstep = self.wave.get_step(unit)
# If the spectrum is being resampled to a larger pixel size,
# then a decimation filter should be applied before
# resampling, to ensure that the new pixel size doesn't
# undersample rapidly changing features in the spectrum.
if step > oldstep and atten > 0.0:
out._decimation_filter(step, atten, unit=unit)
# Get the data, mask (and variance) arrays, and replace bad pixels with
# zeros.
if out._mask is not None: # Is out.data a masked array?
data = out.data.filled(0.0)
if out._var is not None:
var = out.var.filled(0.0)
else:
var = None
mask = out._mask
else: # Is out.data just a numpy array?
mask = ~np.isfinite(out._data)
data = out._data.copy()
data[mask] = 0.0
if out.var is not None:
var = out.var.copy()
var[mask] = 0.0
else:
var = None
# Get the coordinates of the pixels of the input and output spectra.
xi = self.wave.coord()
xo = newwave.coord()
# Get a resampled versions of the data array, optionally the variance
# array, and a floating point version of the mask array. Note that the
# choice of linear interpolation is required to preserve flux.
data = interpolate.griddata(xi, data, xo, method="linear",
fill_value=np.nan)
if var is not None:
var = interpolate.griddata(xi, var, xo, method="linear",
fill_value=np.nan)
mask = interpolate.griddata(xi, mask.astype(float), xo,
method="linear", fill_value=1.0)
# Create a new boolean mask in which all pixels that had an integrated
# contribution of more than 'cutoff' originally masked pixels are
# masked. Note that setting the cutoff to the "obvious" value of zero
# results in lots of pixels being masked that are far away from any
# masked pixels, due to precision errors in the griddata()
# function. Limit the minimum value of the cutoff to avoid this.
mask = np.greater(mask, max(cutoff, 1.0e-6))
# If masked arrays were not in use in the original spectrum, fill
# bad pixels with NaNs.
if out._mask is None:
data[mask] = np.nan
if var is not None:
var[mask] = np.nan
mask = None
# Install the resampled arrays.
out._data = data
out._var = var
out._mask = mask
# Install the new wavelength world coordinates.
out.wave = newwave
# When up-sampling, decimation filter the output spectrum. The
# combination of this and the linear interpolation of the preceding
# griddata() produces a much better interpolation than a cubic spline
# filter can. In particular, a spline interpolation does not conserve
# flux, whereas linear interpolation plus decimation filtering does.
if step < oldstep and atten > 0.0:
out._decimation_filter(step, atten, unit=unit)
return out
def mean(self, lmin=None, lmax=None, weight=True, unit=u.angstrom):
"""Compute the mean flux over a specified wavelength range.
Parameters
----------
lmin : float
The minimum wavelength of the range, or None to choose the
wavelength of the first pixel in the spectrum.
lmax : float
The maximum wavelength of the range, or None to choose the
wavelength of the last pixel in the spectrum.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax. If None, lmin and
lmax are assumed to be pixel indexes.
weight : bool
If weight is True, compute the weighted mean, inversely
weighting each pixel by its variance.
Returns
-------
out : (float, float)
The mean flux and its error.
"""
# Don't attempt to perform a weighted mean if there are no variances.
if self._var is None:
weight = False
# Get the slice that selects the specified wavelength range.
try:
lambda_slice = self._wavelengths_to_slice(lmin, lmax, unit)
except ValueError:
return (0.0, np.inf)
# Obtain the mean flux of the sub-spectrum.
if weight:
weights = 1.0 / self.var[lambda_slice].filled(np.inf)
flux, wsum = np.ma.average(self.data[lambda_slice],
weights=weights, returned=True)
if self.var is not None:
err_flux = np.sqrt(
np.ma.sum(self.var[lambda_slice] * weights**2) / wsum**2)
else:
err_flux = np.inf
else:
flux, wsum = np.ma.average(self.data[lambda_slice], returned=True)
if self.var is not None:
err_flux = np.sqrt(np.ma.sum(self.var[lambda_slice])) / wsum**2
else:
err_flux = np.inf
return (flux, err_flux)
def sum(self, lmin=None, lmax=None, weight=True, unit=u.angstrom):
"""Obtain the sum of the fluxes within a specified wavelength range.
Parameters
----------
lmin : float
The minimum wavelength of the range, or None to choose the
wavelength of the first pixel in the spectrum.
lmax : float
The maximum wavelength of the range, or None to choose the
wavelength of the last pixel in the spectrum.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax. If None, lmin and
lmax are assumed to be pixel indexes.
weight : bool
If weight is True, compute the weighted sum, inversely
weighting each pixel by its variance.
Returns
-------
out : float, float
The total flux and its error.
"""
# Get the slice that selects the specified wavelength range.
try:
lambda_slice = self._wavelengths_to_slice(lmin, lmax, unit)
except ValueError:
return (0.0, np.inf)
# Perform a weighted sum?
if weight and self._var is not None:
weights = 1.0 / self.var[lambda_slice].filled(np.inf)
# How many unmasked pixels will be averaged?
nsum = np.ma.count(self.data[lambda_slice])
fmean, wsum = np.ma.average(self.data[lambda_slice],
weights=weights, returned=True)
# The weighted average multiplied by the number of unmasked pixels.
flux = fmean * nsum
if self.var is not None:
err_flux = np.sqrt(
np.ma.sum(self.var[lambda_slice] * weights**2) /
wsum**2 * nsum**2)
else:
err_flux = np.inf
else:
flux = self.data[lambda_slice].sum()
if self.var is not None:
err_flux = np.sqrt(np.ma.sum(self.var[lambda_slice]))
else:
err_flux = np.inf
return (flux, err_flux)
def integrate(self, lmin=None, lmax=None, unit=u.angstrom):
"""Integrate the flux over a specified wavelength range.
The units of the integrated flux depend on the flux units of
the spectrum and the wavelength units, as follows:
If the flux units of the spectrum, self.unit, are something
like Q per angstrom, Q per nm, or Q per um, then the
integrated flux will have the units of Q. For example, if the
fluxes have units of 1e-20 erg/cm2/Angstrom/s, then the units
of the integration will be 1e-20 erg/cm2/s.
Alternatively, if unit is not None, then the unit of the
returned number will be the product of the units in self.unit
and unit. For example, if the flux units are counts/s, and
unit=u.angstrom, then the integrated flux will have units
counts*Angstrom/s.
Finally, if unit is None, then the units of the returned
number will be the product of self.unit and the units of the
wavelength axis of the spectrum (ie. self.wave.unit).
The result of the integration is returned as an astropy
Quantity, which holds the integrated value and its physical
units. The units of the returned number can be determined
from the .unit attribute of the return value. Alternatively
the returned value can be converted to another unit, using the
to() method of astropy quantities.
Parameters
----------
lmin : float
The minimum wavelength of the range to be integrated,
or None (the default), to select the minimum wavelength
of the first pixel of the spectrum. If this is below the
minimum wavelength of the spectrum, the integration
behaves as though the flux in the first pixel extended
down to that wavelength.
If the unit argument is None, lmin is a pixel index, and
the wavelength of the center of this pixel is used as the
lower wavelength of the integration.
lmax : float
The maximum wavelength of the range to be integrated,
or None (the default), to select the maximum wavelength
of the last pixel of the spectrum. If this is above the
maximum wavelength of the spectrum, the integration
behaves as though the flux in the last pixel extended
up to that wavelength.
If the unit argument is None, lmax is a pixel index, and
the wavelength of the center of this pixel is used as the
upper wavelength of the integration.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax, or None to indicate
that lmin and lmax are pixel indexes.
Returns
-------
out : `astropy.units.Quantity`, `astropy.units.Quantity`
The result of the integration and its error, expressed as
a floating point number with accompanying units. The integrated
value and its physical units can be extracted using the .value and
.unit attributes of the returned quantity. The value can also be
converted to different units, using the .to() method of the
returned objected.
"""
# Get the index of the first pixel within the wavelength range,
# and the minimum wavelength of the integration.
if lmin is None:
i1 = 0
lmin = self.wave.coord(-0.5, unit=unit)
else:
if unit is None:
l1 = lmin
lmin = self.wave.coord(max(-0.5, l1))
else:
l1 = self.wave.pixel(lmin, False, unit)
i1 = max(0, int(l1))
# Get the index of the last pixel within the wavelength range, plus
# 1, and the maximum wavelength of the integration.
if lmax is None:
i2 = self.shape[0]
lmax = self.wave.coord(i2 - 0.5, unit=unit)
else:
if unit is None:
l2 = lmax
lmax = self.wave.coord(min(self.shape[0] - 0.5, l2))
else:
l2 = self.wave.pixel(lmax, False, unit)
i2 = min(self.shape[0], int(l2) + 1)
# Get the lower wavelength of each pixel, including one extra
# pixel at the end of the range.
d = self.wave.coord(-0.5 + np.arange(i1, i2 + 1), unit=unit)
# Change the wavelengths of the first and last pixels to
# truncate or extend those pixels to the starting and ending
# wavelengths of the spectrum.
d[0] = lmin
d[-1] = lmax
if unit is None:
unit = self.wave.unit
# Get the data of the subspectrum covered by the integration.
data = self.data[i1:i2]
# If the spectrum has been calibrated, the flux units will be
# per angstrom, per nm, per um etc. If these wavelength units
# don't match the units of the wavelength axis of the
# integration, then although the results will be correct, they
# will have inconvenient units. In such cases attempt to
# convert the units of the wavelength axis to match the flux
# units.
if unit in self.unit.bases: # The wavelength units already agree.
out_unit = self.unit * unit
else:
try:
# Attempt to determine the wavelength units of the flux density
wunit = (set(self.unit.bases) &
set([u.pm, u.angstrom, u.nm, u.um])).pop()
# Scale the wavelength axis to have the same wavelength units.
d *= unit.to(wunit)
# Get the final units of the integration.
out_unit = self.unit * wunit
# If the wavelength units of the flux weren't recognized,
# simply return the units unchanged.
except Exception:
out_unit = self.unit * unit
# Integrate the spectrum by multiplying the value of each pixel
# by the difference in wavelength from the start of that pixel to
# the start of the next pixel.
flux = (data * np.diff(d)).sum() * out_unit
if self.var is None:
err_flux = np.inf
else:
err_flux = np.sqrt((self.var[i1:i2] * np.diff(d)**2).sum())
return (flux, err_flux * out_unit)
def poly_fit(self, deg, weight=True, maxiter=0,
nsig=(-3.0, 3.0), verbose=False):
"""Perform polynomial fit on normalized spectrum and returns polynomial
coefficients.
Parameters
----------
deg : int
Polynomial degree.
weight : bool
If weight is True, the weight is computed as the inverse of
variance.
maxiter : int
Maximum allowed iterations (0)
nsig : (float,float)
The low and high rejection factor in std units (-3.0,3.0)
Returns
-------
out : ndarray, shape.
Polynomial coefficients ordered from low to high.
"""
if self.shape[0] <= deg + 1:
raise ValueError('Too few points to perform polynomial fit')
if self._var is None:
weight = False
if weight:
vec_weight = 1.0 / np.sqrt(np.abs(self.var.filled(np.inf)))
else:
vec_weight = None
if self._mask is np.ma.nomask:
d = self._data
w = self.wave.coord()
else:
mask = ~self._mask
d = self._data[mask]
w = self.wave.coord()[mask]
if weight:
vec_weight = vec_weight[mask]
# normalize w
w0 = np.min(w)
dw = np.max(w) - w0
w = (w - w0) / dw
p = np.polynomial.polynomial.polyfit(w, d, deg, w=vec_weight)
if maxiter > 0:
err = d - np.polynomial.polynomial.polyval(w, p)
sig = np.std(err)
n_p = len(d)
for it in range(maxiter):
ind = np.where((err >= nsig[0] * sig) &
(np.abs(err) <= nsig[1] * sig))
if len(ind[0]) == n_p:
break
if len(ind[0]) <= deg + 1:
raise ValueError('Too few points to perform '
'polynomial fit')
if vec_weight is not None:
vec_weight = vec_weight[ind]
p = np.polynomial.polynomial.polyfit(w[ind], d[ind],
deg, w=vec_weight)
err = d[ind] - np.polynomial.polynomial.polyval(w[ind], p)
sig = np.std(err)
n_p = len(ind[0])
if verbose:
self._logger.info('Number of iteration: %d Std: %10.4e '
'Np: %d Frac: %4.2f', it + 1, sig, n_p,
100. * n_p / self.shape[0])
return p
def poly_val(self, z):
"""Update in place the spectrum data from polynomial coefficients.
Uses `numpy.poly1d`.
Parameters
----------
z : array
The polynomial coefficients, in increasing powers:
data = z0 + z1(lbda-min(lbda))/(max(lbda)-min(lbda)) + ...
+ zn ((lbda-min(lbda))/(max(lbda)-min(lbda)))**n
"""
l = self.wave.coord()
w0 = np.min(l)
dw = np.max(l) - w0
w = (l - w0) / dw
self._data = np.polynomial.polynomial.polyval(w, z)
if self._mask is not np.ma.nomask:
self._mask = ~(np.isfinite(self._data))
self._var = None
def poly_spec(self, deg, weight=True, maxiter=0,
nsig=(-3.0, 3.0), verbose=False):
"""Return a spectrum containing a polynomial fit.
Parameters
----------
deg : int
Polynomial degree.
weight : bool
If weight is True, the weight is computed as the inverse of
variance.
maxiter : int
Maximum allowed iterations (0)
nsig : (float,float)
The low and high rejection factor in std units (-3.0,3.0)
Returns
-------
out : Spectrum
"""
z = self.poly_fit(deg, weight, maxiter, nsig, verbose)
res = self.clone()
res.poly_val(z)
return res
def abmag_band(self, lbda, dlbda):
"""Compute AB magnitude corresponding to the wavelength band.
Parameters
----------
lbda : float
Mean wavelength in Angstrom.
dlbda : float
Width of the wavelength band in Angstrom.
Returns
-------
out : float, float
Magnitude value and its error
"""
vflux, err_flux = self.mean(lmin=lbda - dlbda / 2.0,
lmax=lbda + dlbda / 2.0,
weight=None, unit=u.angstrom)
if vflux == 0:
return (99, 0)
else:
unit = u.Unit('erg.s-1.cm-2.Angstrom-1')
vflux2 = (vflux * self.unit).to(unit)
err_flux2 = (err_flux * self.unit).to(unit)
return flux2mag(vflux2.value, err_flux2.value, lbda)
def abmag_filter_name(self, name):
"""Compute AB magnitude using the filter name.
Parameters
----------
name : string
'U', 'B', 'V', 'Rc', 'Ic', 'z', 'R-Johnson',
'F606W', 'F775W', 'F814W', 'F850LP'
Returns
-------
out : float, float
Magnitude value and its error
"""
if name == 'U':
return self.abmag_band(3663., 650.)
elif name == 'B':
return self.abmag_band(4361., 890.)
elif name == 'V':
return self.abmag_band(5448., 840.)
elif name == 'Rc':
return self.abmag_band(6410., 1600.)
elif name == 'Ic':
return self.abmag_band(7980., 1500.)
elif name == 'z':
return self.abmag_band(8930., 1470.)
elif name == 'R-Johnson':
(l0, lmin, lmax, tck) = ABmag_filters.mag_RJohnson()
return self._filter(l0, lmin, lmax, tck)
else:
FILTERS = join(abspath(dirname(__file__)), 'filters',
'filter_list.fits')
filtname = 'ACS_' + name
with fits.open(FILTERS) as hdul:
if filtname not in hdul:
raise ValueError("filter '{}' not found".format(filtname))
lbda = hdul[filtname].data['lambda']
thr = hdul[filtname].data['throughput']
return self.abmag_filter(lbda, thr)
def abmag_filter(self, lbda, eff):
"""Compute AB magnitude using array filter.
Parameters
----------
lbda : float array
Wavelength values in Angstrom.
eff : float array
Efficiency values.
Returns
-------
out : float, float
Magnitude value and its error
"""
lbda = np.asarray(lbda)
eff = np.asarray(eff)
if np.shape(lbda) != np.shape(eff):
raise TypeError('lbda and eff inputs have not the same size.')
l0 = np.average(lbda, weights=eff)
lmin = lbda[0]
lmax = lbda[-1]
k = 3 if lbda.shape[0] > 3 else 1
tck = interpolate.splrep(lbda, eff, k=k)
return self._filter(l0, lmin, lmax, tck)
def _filter(self, l0, lmin, lmax, tck):
"""Compute AB magnitude.
Parameters
----------
l0 : float
Mean wavelength in Angstrom.
lmin : float
Minimum wavelength in Angstrom.
lmax : float
Maximum wavelength in Angstrom.
tck : 3-tuple
(t,c,k) contains the spline representation. t = the knot-points,
c = coefficients and k = the order of the spline.
"""
try:
lambda_slice = self._wavelengths_to_slice(lmin, lmax, u.Angstrom)
except ValueError:
raise ValueError('Spectrum outside Filter band')
if lambda_slice.start == (lambda_slice.stop - 1):
raise ValueError('Filter band smaller than spectrum step')
lb = self.wave.coord(np.arange(lambda_slice.start, lambda_slice.stop),
unit=u.Angstrom)
w = interpolate.splev(lb, tck, der=0)
vflux, wsum = np.ma.average(self.data[lambda_slice], weights=w,
returned=True)
if self.var is not None:
err_flux = np.sqrt(np.ma.sum(self.var[lambda_slice] * w**2) /
wsum**2)
else:
err_flux = np.inf
unit = u.Unit('erg.s-1.cm-2.Angstrom-1')
vflux2 = (vflux * self.unit).to(unit)
err_flux2 = (err_flux * self.unit).to(unit)
return flux2mag(vflux2.value, err_flux2.value, l0)
def wavelet_filter(self, levels=9, sigmaCutoff=5.0, epsilon=0.05,
inplace=False):
"""Perform a wavelet filtering on the spectrum in 1 dimension.
Code contributed by <NAME> (EPFL, 2016), and used in
https://arxiv.org/abs/1703.09239 (with funding from ERC Advanced
Grant LIDA, Swiss National Science Foundation, ERC Starting Grant
336736-CALENDS).
Parameters
----------
levels : int
Highest scale level.
sigmaCutoff : float
Cleaning threshold.
By default 5 for a 5 sigma cleaning in wavelet space.
epsilon : float in ]0,1[
Residual criterion used to perform the cleaning
inplace : bool
If False, return a filtered copy of the spectrum (the default).
If True, filter the original spectrum in-place, and return that.
Returns
-------
out : Spectrum
"""
res = self if inplace else self.copy()
deNoisedSignal = wavelet1D.cleanSignal(self._data, np.sqrt(self._var),
levels, sigmaCutoff=sigmaCutoff,
epsilon=epsilon)
res._data = deNoisedSignal
res._var = None
return res
def truncate(self, lmin=None, lmax=None, unit=u.angstrom):
"""Truncate the wavelength range of a spectrum in-place.
Parameters
----------
lmin : float
The minimum wavelength of a wavelength range, or the wavelength
of a single pixel if lmax is None.
lmax : float or None
The maximum wavelength of the wavelength range.
unit : `astropy.units.Unit`
The wavelength units of the lmin and lmax arguments. The
default is angstroms. If unit is None, then lmin and lmax
are interpreted as array indexes within the spectrum.
"""
# Get the slice that selects the specified wavelength range.
lambda_slice = self._wavelengths_to_slice(lmin, lmax, unit)
if lambda_slice.start == (lambda_slice.stop - 1):
raise ValueError('Minimum and maximum wavelengths are equal')
res = self[lambda_slice]
self._data = res._data
self._var = res._var
self._mask = res._mask
self.wave = res.wave
def fwhm(self, l0, cont=0, spline=False, unit=u.angstrom):
"""Return the fwhm of a peak.
Parameters
----------
l0 : float
Wavelength value corresponding to the peak position.
unit : `astropy.units.Unit`
Type of the wavelength coordinates. If None, inputs are in pixels.
cont : int
The continuum [default 0].
spline : bool
Linear/spline interpolation to interpolate masked values.
Returns
-------
out : float
"""
if unit is None:
k0 = int(l0 + 0.5)
step = 1
else:
k0 = self.wave.pixel(l0, nearest=True, unit=unit)
step = self.wave.get_step(unit=unit)
d = self._interp_data(spline) - cont
f2 = d[k0] / 2
try:
k2 = np.argwhere(d[k0:-1] < f2)[0][0] + k0
i2 = np.interp(f2, d[k2:k2 - 2:-1], [k2, k2 - 1])
k1 = k0 - np.argwhere(d[k0:-1] < f2)[0][0]
i1 = np.interp(f2, d[k1:k1 + 2], [k1, k1 + 1])
fwhm = (i2 - i1) * step
return fwhm
except Exception:
try:
k2 = np.argwhere(d[k0:-1] > f2)[0][0] + k0
i2 = np.interp(f2, d[k2:k2 - 2:-1], [k2, k2 - 1])
k1 = k0 - np.argwhere(d[k0:-1] > f2)[0][0]
i1 = np.interp(f2, d[k1:k1 + 2], [k1, k1 + 1])
fwhm = (i2 - i1) * step
return fwhm
except Exception:
raise ValueError('Error in fwhm estimation')
def gauss_fit(self, lmin, lmax, lpeak=None, flux=None, fwhm=None,
cont=None, peak=False, spline=False, weight=True,
plot=False, plot_factor=10, unit=u.angstrom,
fix_lpeak=False):
"""Perform a Gaussian fit.
Uses `scipy.optimize.leastsq` to minimize the sum of squares.
Parameters
----------
lmin : float or (float,float)
Minimum wavelength value or wavelength range
used to initialize the gaussian left value (in angstrom)
lmax : float or (float,float)
Maximum wavelength or wavelength range
used to initialize the gaussian right value (in angstrom)
lpeak : float
Input gaussian center (in angstrom), if None it is estimated
with the wavelength corresponding to the maximum value
in [max(lmin), min(lmax)]
unit : `astropy.units.Unit`
Type of the wavelength coordinates. If None, inputs are in pixels.
flux : float
Integrated gaussian flux or gaussian peak value if peak is True.
fwhm : float
Input gaussian fwhm (in angstrom), if None it is estimated.
peak : bool
If true, flux contains the gaussian peak value .
cont : float
Continuum value, if None it is estimated by the line through points
(max(lmin),mean(data[lmin])) and (min(lmax),mean(data[lmax])).
spline : bool
Linear/spline interpolation to interpolate masked values.
weight : bool
If weight is True, the weight is computed as the inverse of
variance.
plot : bool
If True, the Gaussian is plotted.
plot_factor : double
oversampling factor for the overplotted fit
Returns
-------
out : `mpdaf.obj.Gauss1D`
"""
# truncate the spectrum and compute right and left gaussian values
if np.isscalar(lmin):
fmin = None
else:
lmin = np.array(lmin, dtype=float)
fmin = self.mean(lmin[0], lmin[1], unit=unit)[0]
lmin = (lmin[0] + lmin[1]) / 2.
if np.isscalar(lmax):
fmax = None
else:
lmax = np.array(lmax, dtype=float)
fmax = self.mean(lmax[0], lmax[1], unit=unit)[0]
lmax = (lmax[0] + lmax[1]) / 2.
spec = self.subspec(lmin, lmax, unit=unit)
data = spec._interp_data(spline)
if unit is None:
l = np.arange(self.shape, dtype=float)
else:
l = spec.wave.coord(unit=unit)
lmin = l[0]
lmax = l[-1]
if fmin is None:
fmin = data[0]
if fmax is None:
fmax = data[-1]
# initial gaussian peak position
if lpeak is None:
lpeak = l[data.argmax()]
# continuum value
if cont is None:
cont0 = ((fmax - fmin) * lpeak + lmax *
fmin - lmin * fmax) / (lmax - lmin)
else:
cont0 = cont
# initial sigma value
if fwhm is None:
try:
fwhm = spec.fwhm(lpeak, cont0, spline, unit=unit)
except Exception:
lpeak2 = l[data.argmin()]
fwhm = spec.fwhm(lpeak2, cont0, spline, unit=unit)
sigma = fwhm * gaussian_fwhm_to_sigma
# initial gaussian integrated flux
if flux is None:
if unit is None:
pixel = int(lpeak + 0.5)
else:
pixel = spec.wave.pixel(lpeak, nearest=True, unit=unit)
peak = data[pixel] - cont0
flux = peak * np.sqrt(2 * np.pi * (sigma ** 2))
elif peak is True:
peak = flux - cont0
flux = peak * np.sqrt(2 * np.pi * (sigma ** 2))
else:
pass
# 1d gaussian function: p = (ampl, sigma, center)
if fix_lpeak:
if cont is None:
gaussfit = lambda p, x: \
((fmax - fmin) * x + lmax * fmin - lmin * fmax) \
/ (lmax - lmin) + np.abs(p[0]) \
* (1 / np.sqrt(2 * np.pi * (p[1] ** 2))) \
* np.exp(-(x - lpeak) ** 2 / (2 * p[1] ** 2))
else:
gaussfit = lambda p, x: \
cont + p[0] * (1 / np.sqrt(2 * np.pi * (p[1] ** 2))) \
* np.exp(-(x - lpeak) ** 2 / (2 * p[1] ** 2))
else:
if cont is None:
gaussfit = lambda p, x: \
((fmax - fmin) * x + lmax * fmin - lmin * fmax) \
/ (lmax - lmin) + p[0] \
* (1 / np.sqrt(2 * np.pi * (p[1] ** 2))) \
* np.exp(-(x - p[2]) ** 2 / (2 * p[1] ** 2))
else:
gaussfit = lambda p, x: \
cont + p[0] * (1 / np.sqrt(2 * np.pi * (p[1] ** 2))) \
* np.exp(-(x - p[2]) ** 2 / (2 * p[1] ** 2))
if spec.var is not None and weight:
wght = 1.0 / np.sqrt(np.abs(spec.var))
np.ma.fix_invalid(wght, copy=False, fill_value=0)
else:
wght = np.ones(spec.shape)
# inital guesses for Gaussian Fit
v0 = [flux, sigma]
if not fix_lpeak:
v0.append(lpeak)
# Minimize the sum of squares
e_gauss_fit = lambda p, x, y, w: w * (gaussfit(p, x) - y)
v, covar, info, mesg, success = leastsq(e_gauss_fit, v0[:],
args=(l, data, wght),
maxfev=100000, full_output=1)
# calculate the errors from the estimated covariance matrix
chisq = sum(info["fvec"] * info["fvec"])
dof = len(info["fvec"]) - len(v)
if covar is not None:
err = np.array([np.sqrt(np.abs(covar[i, i])) *
np.sqrt(np.abs(chisq / dof))
for i in range(len(v))])
else:
err = [np.nan] * len(v)
if plot:
import matplotlib.pyplot as plt
xxx = np.arange(l[0], l[-1], (l[1] - l[0]) / plot_factor)
ccc = gaussfit(v, xxx)
plt.plot(xxx, ccc, 'r--')
# return a Gauss1D object
v = list(v)
err = list(err)
if not fix_lpeak:
lpeak = v.pop()
err_lpeak = err.pop()
else:
err_lpeak = 0
flux, sigma = v[0], np.abs(v[1])
fwhm = sigma * gaussian_sigma_to_fwhm
peak = flux / np.sqrt(2 * np.pi * (sigma ** 2))
err_flux, err_sigma = err
err_fwhm = err_sigma * gaussian_sigma_to_fwhm
err_peak = np.abs(1. / np.sqrt(2 * np.pi) *
(err_flux * sigma - flux * err_sigma) /
sigma / sigma)
return Gauss1D(lpeak, peak, flux, fwhm, cont0, err_lpeak,
err_peak, err_flux, err_fwhm, chisq, dof)
def add_gaussian(self, lpeak, flux, fwhm, cont=0, peak=False,
unit=u.angstrom):
"""Add a gaussian on spectrum in place.
Parameters
----------
lpeak : float
Gaussian center.
flux : float
Integrated gaussian flux or gaussian peak value if peak is True.
fwhm : float
Gaussian fwhm.
cont : float
Continuum value.
peak : bool
If true, flux contains the gaussian peak value
unit : `astropy.units.Unit`
Type of the wavelength coordinates. If None, inputs are in pixels.
"""
gauss = lambda p, x: cont \
+ p[0] * (1 / np.sqrt(2 * np.pi * (p[2] ** 2))) \
* np.exp(-(x - p[1]) ** 2 / (2 * p[2] ** 2))
sigma = fwhm * gaussian_fwhm_to_sigma
if peak is True:
flux = flux * np.sqrt(2 * np.pi * (sigma ** 2))
lmin = lpeak - 5 * sigma
lmax = lpeak + 5 * sigma
if unit is None:
imin = int(lmin + 0.5)
imax = int(lmax + 0.5)
else:
imin = self.wave.pixel(lmin, nearest=True, unit=unit)
imax = self.wave.pixel(lmax, nearest=True, unit=unit)
if imin == imax:
if imin == 0 or imin == self.shape[0]:
raise ValueError('Gaussian outside spectrum wavelength range')
if unit is None:
wave = np.arange(imin, imax, dtype=float)
else:
wave = self.wave.coord(unit=unit)[imin:imax]
v = [flux, lpeak, sigma]
self.data[imin:imax] += gauss(v, wave)
def gauss_dfit(self, lmin, lmax, wratio, lpeak_1=None,
flux_1=None, fratio=1., fwhm=None, cont=None,
peak=False, spline=False, weight=True,
plot=False, plot_factor=10, unit=u.angstrom):
"""Truncate the spectrum and fit it as a sum of two gaussian functions.
Returns the two gaussian functions as `mpdaf.obj.Gauss1D` objects.
From <NAME> and <NAME>.
Parameters
----------
lmin : float or (float,float)
Minimum wavelength value or wavelength range
used to initialize the gaussian left value.
lmax : float or (float,float)
Maximum wavelength or wavelength range
used to initialize the gaussian right value.
wratio : float
Ratio between the two gaussian centers
lpeak_1 : float
Input gaussian center of the first gaussian. if None it is
estimated with the wavelength corresponding to the maximum value
in [max(lmin), min(lmax)]
flux_1 : float
Integrated gaussian flux or gaussian peak value if peak is True.
fratio : float
Ratio between the two integrated gaussian fluxes.
fwhm : float
Input gaussian fwhm, if None it is estimated.
peak : bool
If true, flux contains the gaussian peak value .
cont : float
Continuum value, if None it is estimated by the line through points
(max(lmin),mean(data[lmin])) and (min(lmax),mean(data[lmax])).
spline : bool
Linear/spline interpolation to interpolate masked values.
weight : bool
If weight is True, the weight is computed as the inverse of
variance.
plot : bool
If True, the resulted fit is plotted.
plot_factor : double
oversampling factor for the overplotted fit
unit : `astropy.units.Unit`
Type of the wavelength coordinates. If None, inputs are in pixels.
Returns
-------
out : `mpdaf.obj.Gauss1D`, `mpdaf.obj.Gauss1D`
"""
if np.isscalar(lmin):
fmin = None
else:
lmin = np.array(lmin, dtype=float)
fmin = self.mean(lmin[0], lmin[1], weight=False, unit=unit)[0]
lmin = lmin[1]
if np.isscalar(lmax):
fmax = None
else:
lmax = np.array(lmax, dtype=float)
fmax = self.mean(lmax[0], lmax[1], weight=False, unit=unit)[0]
lmax = lmax[0]
# spec = self.truncate(lmin, lmax)
spec = self.subspec(lmin, lmax, unit=unit)
data = spec._interp_data(spline)
if unit is None:
l = np.arange(self.shape, dtype=float)
else:
l = spec.wave.coord(unit=unit)
lmin = l[0]
lmax = l[-1]
if fmin is None:
fmin = data[0]
if fmax is None:
fmax = data[-1]
# initial gaussian peak position
if lpeak_1 is None:
lpeak_1 = l[data.argmax()]
# continuum value
if cont is None:
cont0 = ((fmax - fmin) * lpeak_1 + lmax *
fmin - lmin * fmax) / (lmax - lmin)
else:
cont0 = cont
# initial sigma value
if fwhm is None:
try:
fwhm = spec.fwhm(lpeak_1, cont0, spline, unit=unit)
except Exception:
lpeak_1 = l[data.argmin()]
fwhm = spec.fwhm(lpeak_1, cont0, spline, unit=unit)
sigma = fwhm * gaussian_fwhm_to_sigma
# initial gaussian integrated flux
if flux_1 is None:
if unit is None:
pixel = int(lpeak_1 + 0.5)
else:
pixel = spec.wave.pixel(lpeak_1, nearest=True, unit=unit)
peak_1 = data[pixel] - cont0
flux_1 = peak_1 * np.sqrt(2 * np.pi * (sigma ** 2))
elif peak is True:
peak_1 = flux_1 - cont0
flux_1 = peak_1 * np.sqrt(2 * np.pi * (sigma ** 2))
else:
pass
flux_2 = fratio * flux_1
# 1d gaussian function
# p[0]: flux 1, p[1]:center 1, p[2]: fwhm, p[3] = peak 2
gaussfit = lambda p, x: cont0 + \
p[0] * (1 / np.sqrt(2 * np.pi * (p[2] ** 2))) * \
np.exp(-(x - p[1]) ** 2 / (2 * p[2] ** 2)) + \
p[3] * (1 / np.sqrt(2 * np.pi * (p[2] ** 2))) * \
np.exp(-(x - (p[1] * wratio)) ** 2 / (2 * p[2] ** 2))
# 1d gaussian fit
if spec.var is not None and weight:
wght = 1.0 / np.sqrt(np.abs(spec.var))
np.ma.fix_invalid(wght, copy=False, fill_value=0)
else:
wght = np.ones(spec.shape)
e_gauss_fit = lambda p, x, y, w: w * (gaussfit(p, x) - y)
# inital guesses for Gaussian Fit
v0 = [flux_1, lpeak_1, sigma, flux_2]
# Minimize the sum of squares
v, covar, info, mesg, success = leastsq(
e_gauss_fit, v0[:], args=(l, data, wght), maxfev=100000,
full_output=1)
# calculate the errors from the estimated covariance matrix
chisq = sum(info["fvec"] * info["fvec"])
dof = len(info["fvec"]) - len(v)
if covar is not None:
err = np.array([np.sqrt(np.abs(covar[i, i])) *
np.sqrt(np.abs(chisq / dof))
for i in range(len(v))])
else:
err = None
if plot:
import matplotlib.pyplot as plt
xxx = np.arange(l[0], l[-1], (l[1] - l[0]) / plot_factor)
ccc = gaussfit(v, xxx)
plt.plot(xxx, ccc, 'r--')
# return a Gauss1D object
flux_1 = v[0]
flux_2 = v[3]
lpeak_1 = v[1]
lpeak_2 = lpeak_1 * wratio
sigma = np.abs(v[2])
fwhm = sigma * gaussian_sigma_to_fwhm
peak_1 = flux_1 / np.sqrt(2 * np.pi * (sigma ** 2))
peak_2 = flux_2 / np.sqrt(2 * np.pi * (sigma ** 2))
if err is not None:
err_flux_1 = err[0]
err_flux_2 = err[3]
err_lpeak_1 = err[1]
err_lpeak_2 = err[1] * wratio
err_sigma = err[2]
err_fwhm = err_sigma * gaussian_sigma_to_fwhm
err_peak_1 = np.abs(1. / np.sqrt(2 * np.pi) *
(err_flux_1 * sigma - flux_1 * err_sigma) /
sigma / sigma)
err_peak_2 = np.abs(1. / np.sqrt(2 * np.pi) *
(err_flux_2 * sigma - flux_2 * err_sigma) /
sigma / sigma)
else:
err_flux_1 = np.NAN
err_flux_2 = np.NAN
err_lpeak_1 = np.NAN
err_lpeak_2 = np.NAN
err_sigma = np.NAN
err_fwhm = np.NAN
err_peak_1 = np.NAN
err_peak_2 = np.NAN
return (Gauss1D(lpeak_1, peak_1, flux_1, fwhm, cont0, err_lpeak_1,
err_peak_1, err_flux_1, err_fwhm, chisq, dof),
Gauss1D(lpeak_2, peak_2, flux_2, fwhm, cont0, err_lpeak_2,
err_peak_2, err_flux_2, err_fwhm, chisq, dof))
def gauss_asymfit(self, lmin, lmax, lpeak=None, flux=None, fwhm=None,
cont=None, peak=False, spline=False, weight=True,
plot=False, plot_factor=10, ax=None, unit=u.angstrom):
"""Truncate the spectrum and fit it with an asymetric gaussian
function.
Returns the two gaussian functions (right and left) as
`mpdaf.obj.Gauss1D` objects.
From <NAME> and <NAME>, modified by <NAME>.
Parameters
----------
lmin : float or (float,float)
Minimum wavelength value or wavelength range
used to initialize the gaussian left value.
lmax : float or (float,float)
Maximum wavelength or wavelength range
used to initialize the gaussian right value.
lpeak : float
Input gaussian center. if None it is estimated with the wavelength
corresponding to the maximum value in ``[max(lmin), min(lmax)]``.
flux : float
Integrated gaussian flux or gaussian peak value if peak is True.
fwhm : float
Input gaussian fwhm, if None it is estimated.
peak : bool
If true, flux contains the gaussian peak value .
cont : float
Continuum value, if None it is estimated by the line through points
(max(lmin),mean(data[lmin])) and (min(lmax),mean(data[lmax])).
spline : bool
Linear/spline interpolation to interpolate masked values.
weight : bool
If weight is True, the weight is computed as the inverse of
variance.
unit : `astropy.units.Unit`
type of the wavelength coordinates. If None, inputs are in pixels.
plot : bool
If True, the resulted fit is plotted.
plot_factor : double
oversampling factor for the overplotted fit
Returns
-------
out : `mpdaf.obj.Gauss1D`, `mpdaf.obj.Gauss1D`
Left and right Gaussian functions.
"""
if np.isscalar(lmin):
fmin = None
else:
lmin = np.array(lmin, dtype=float)
fmin = self.mean(lmin[0], lmin[1], weight=False, unit=unit)[0]
lmin = lmin[1]
if np.isscalar(lmax):
fmax = None
else:
lmax = np.array(lmax, dtype=float)
fmax = self.mean(lmax[0], lmax[1], weight=False, unit=unit)[0]
lmax = lmax[0]
spec = self.subspec(lmin, lmax, unit=unit)
data = spec._interp_data(spline)
if unit is None:
l = np.arange(self.shape, dtype=float)
else:
l = spec.wave.coord(unit=unit)
lmin = l[0]
lmax = l[-1]
if fmin is None:
fmin = data[0]
if fmax is None:
fmax = data[-1]
# initial gaussian peak position
if lpeak is None:
lpeak = l[data.argmax()]
# continuum value
if cont is None:
cont0 = ((fmax - fmin) * lpeak + lmax * fmin -
lmin * fmax) / (lmax - lmin)
else:
cont0 = cont
# initial sigma value
if fwhm is None:
try:
fwhm = spec.fwhm(lpeak, cont0, spline, unit=unit)
except Exception:
lpeak = l[data.argmin()]
fwhm = spec.fwhm(lpeak, cont0, spline, unit=unit)
sigma = fwhm * gaussian_fwhm_to_sigma
# initial gaussian integrated flux
if flux is None:
if unit is None:
pixel = int(lpeak + 0.5)
else:
pixel = spec.wave.pixel(lpeak, nearest=True, unit=unit)
peak = data[pixel] - cont0
flux = peak * np.sqrt(2 * np.pi * (sigma ** 2))
elif peak is True:
peak = flux - cont0
flux = peak * np.sqrt(2 * np.pi * (sigma ** 2))
else:
pass
# Asymetric gaussian function (p[0]: flux of the right-hand side if it
# was full... ; p[1]: lambda peak; p[2]:sigma_right; p[3]: sigma_left)
asymfit = lambda p, x: np.where(
x > p[1],
cont0 + p[0] / np.sqrt(2 * np.pi) / p[2] *
np.exp(-(x - p[1]) ** 2 / (2. * p[2] ** 2)),
cont0 + p[0] * p[3] / p[2] / np.sqrt(2 * np.pi) / p[3] *
| np.exp(-(x - p[1]) ** 2 / (2. * p[3] ** 2)) | numpy.exp |
# -*- coding: utf-8 -*-
__all__ = ["optimize"]
import os
import sys
import numpy as np
import pymc3 as pm
import theano
from pymc3.blocking import ArrayOrdering, DictToArrayBijection
from pymc3.model import Point
from pymc3.theanof import inputvars
from pymc3.util import (
get_default_varnames,
get_untransformed_name,
is_transformed_name,
update_start_vals,
)
from .utils import (
get_args_for_theano_function,
get_theano_function_for_var,
logger,
)
def start_optimizer(vars, verbose=True, progress_bar=True, **kwargs):
if verbose:
names = [
get_untransformed_name(v.name)
if is_transformed_name(v.name)
else v.name
for v in vars
]
sys.stderr.write(
"optimizing logp for variables: [{0}]\n".format(", ".join(names))
)
if progress_bar is True:
if "EXOPLANET_NO_AUTO_PBAR" in os.environ:
from tqdm import tqdm
else:
from tqdm.auto import tqdm
progress_bar = tqdm(**kwargs)
# Check whether the input progress bar has the expected methods
has_progress_bar = (
hasattr(progress_bar, "set_postfix")
and hasattr(progress_bar, "update")
and hasattr(progress_bar, "close")
)
return has_progress_bar, progress_bar
def get_point(wrapper, x):
vars = get_default_varnames(wrapper.model.unobserved_RVs, True)
return {
var.name: value
for var, value in zip(
vars, wrapper.model.fastfn(vars)(wrapper.bij.rmap(x))
)
}
def optimize(
start=None,
vars=None,
model=None,
return_info=False,
verbose=True,
progress_bar=True,
**kwargs
):
"""Maximize the log prob of a PyMC3 model using scipy
All extra arguments are passed directly to the ``scipy.optimize.minimize``
function.
Args:
start: The PyMC3 coordinate dictionary of the starting position
vars: The variables to optimize
model: The PyMC3 model
return_info: Return both the coordinate dictionary and the result of
``scipy.optimize.minimize``
verbose: Print the success flag and log probability to the screen
progress_bar: A ``tqdm`` progress bar instance. Set to ``True``
(default) to use ``tqdm.auto.tqdm()``. Set to ``False`` to disable.
"""
from scipy.optimize import minimize
wrapper = ModelWrapper(start=start, vars=vars, model=model)
has_progress_bar, progress_bar = start_optimizer(
wrapper.vars, verbose=verbose, progress_bar=progress_bar
)
# This returns the objective function and its derivatives
def objective(vec):
nll, grad = wrapper(vec)
if verbose and has_progress_bar:
progress_bar.set_postfix(logp="{0:e}".format(-nll))
progress_bar.update()
return nll, grad
# Optimize using scipy.optimize
x0 = wrapper.bij.map(wrapper.start)
initial = objective(x0)[0]
kwargs["jac"] = True
info = minimize(objective, x0, **kwargs)
# Only accept the output if it is better than it was
x = info.x if (np.isfinite(info.fun) and info.fun < initial) else x0
# Coerce the output into the right format
point = get_point(wrapper, x)
if verbose:
if has_progress_bar:
progress_bar.close()
sys.stderr.write("message: {0}\n".format(info.message))
sys.stderr.write("logp: {0} -> {1}\n".format(-initial, -info.fun))
if not np.isfinite(info.fun):
logger.warning("final logp not finite, returning initial point")
logger.warning(
"this suggests that something is wrong with the model"
)
logger.debug("{0}".format(info))
if return_info:
return point, info
return point
def optimize_iterator(
stepper, maxiter=1000, start=None, vars=None, model=None, **kwargs
):
"""Maximize the log prob of a PyMC3 model using scipy
All extra arguments are passed directly to the ``scipy.optimize.minimize``
function.
Args:
stepper: An optimizer object
maxiter: The maximum number of steps to run
start: The PyMC3 coordinate dictionary of the starting position
vars: The variables to optimize
model: The PyMC3 model
return_info: Return both the coordinate dictionary and the result of
``scipy.optimize.minimize``
verbose: Print the success flag and log probability to the screen
progress_bar: A ``tqdm`` progress bar instance. Set to ``True``
(default) to use ``tqdm.auto.tqdm()``. Set to ``False`` to disable.
"""
wrapper = ModelWrapper(start=start, vars=vars, model=model)
x = wrapper.bij.map(wrapper.start)
n = 0
stepper.reset()
while True:
x, nll = stepper.step(wrapper, x)
yield nll, get_point(wrapper, x)
n += 1
if maxiter is not None and n >= maxiter:
break
def allinmodel(vars, model):
notin = [v for v in vars if v not in model.vars]
if notin:
raise ValueError("Some variables not in the model: " + str(notin))
class ModelWrapper:
def __init__(self, start=None, vars=None, model=None):
model = self.model = pm.modelcontext(model)
# Work out the full starting coordinates
if start is None:
start = model.test_point
else:
update_start_vals(start, model.test_point, model)
self.start = start
# Fit all the parameters by default
if vars is None:
vars = model.cont_vars
vars = self.vars = inputvars(vars)
allinmodel(vars, model)
# Work out the relevant bijection map
start = Point(start, model=model)
self.bij = DictToArrayBijection(ArrayOrdering(vars), start)
# Pre-compile the theano model and gradient
nlp = -model.logpt
grad = theano.grad(nlp, vars, disconnected_inputs="ignore")
self.func = get_theano_function_for_var([nlp] + grad, model=model)
def __call__(self, vec):
try:
res = self.func(
*get_args_for_theano_function(
self.bij.rmap(vec), model=self.model
)
)
except Exception:
import traceback
print("array:", vec)
print("point:", self.bij.rmap(vec))
traceback.print_exc()
raise
d = dict(zip((v.name for v in self.vars), res[1:]))
g = self.bij.map(d)
return res[0], g
class Adam:
"""https://github.com/pytorch/pytorch/blob/master/torch/optim/adam.py"""
def __init__(
self,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0])
)
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1])
)
if not 0.0 <= weight_decay:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay)
)
self.lr = lr
self.betas = betas
self.eps = eps
self.weight_decay = weight_decay
self.amsgrad = amsgrad
self.reset()
def reset(self):
self.state = {"step": 0}
def step(self, loss_and_grad_func, p):
loss, grad = loss_and_grad_func(p)
state = self.state
if state["step"] == 0:
# Exponential moving average of gradient values
state["exp_avg"] = np.zeros_like(p)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = np.zeros_like(p)
if self.amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = np.zeros_like(p)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if self.amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = self.betas
state["step"] += 1
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
if self.weight_decay != 0:
grad[:] += self.weight_decay * p
# Decay the first and second moment running average coefficient
exp_avg[:] *= beta1
exp_avg[:] += (1 - beta1) * grad
exp_avg_sq[:] *= beta2
exp_avg_sq[:] += (1 - beta2) * grad ** 2
if self.amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
max_exp_avg_sq[:] = np.maximum(max_exp_avg_sq, exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (
np.sqrt(max_exp_avg_sq) / | np.sqrt(bias_correction2) | numpy.sqrt |
# random_walk.py
# -------------------------------------------------------------------------
# Monte Carlo simulation of a two-dimensional random walk.
# -------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import random as rng
num_steps = 500
x_step = rng(num_steps) > 0.5
y_step = rng(num_steps) > 0.5
x_step = 2*x_step - 1
y_step = 2*y_step - 1
x_position = np.cumsum(x_step)
y_position = np.cumsum(y_step)
plt.figure()
plt.plot(x_position, y_position)
plt.axis('equal')
# A more succinct alternative:
x = ( 2*( | rng(num_steps) | numpy.random.random |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing layers."""
import functools
from absl.testing import parameterized
import keras
from keras.engine import sequential
from keras.layers.preprocessing import image_preprocessing
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import stateless_random_ops
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class ResizingTest(test_combinations.TestCase):
def _run_test(self, kwargs, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs.update({'height': expected_height, 'width': expected_width})
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.Resizing,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(('down_sample_bilinear_2_by_2', {
'interpolation': 'bilinear'
}, 2, 2), ('down_sample_bilinear_3_by_2', {
'interpolation': 'bilinear'
}, 3, 2), ('down_sample_nearest_2_by_2', {
'interpolation': 'nearest'
}, 2, 2), ('down_sample_nearest_3_by_2', {
'interpolation': 'nearest'
}, 3, 2), ('down_sample_area_2_by_2', {
'interpolation': 'area'
}, 2, 2), ('down_sample_area_3_by_2', {
'interpolation': 'area'
}, 3, 2), ('down_sample_crop_to_aspect_ratio_3_by_2', {
'interpolation': 'bilinear',
'crop_to_aspect_ratio': True,
}, 3, 2))
def test_down_sampling(self, kwargs, expected_height, expected_width):
self._run_test(kwargs, expected_height, expected_width)
@parameterized.named_parameters(('up_sample_bilinear_10_by_12', {
'interpolation': 'bilinear'
}, 10, 12), ('up_sample_bilinear_12_by_12', {
'interpolation': 'bilinear'
}, 12, 12), ('up_sample_nearest_10_by_12', {
'interpolation': 'nearest'
}, 10, 12), ('up_sample_nearest_12_by_12', {
'interpolation': 'nearest'
}, 12, 12), ('up_sample_area_10_by_12', {
'interpolation': 'area'
}, 10, 12), ('up_sample_area_12_by_12', {
'interpolation': 'area'
}, 12, 12), ('up_sample_crop_to_aspect_ratio_12_by_14', {
'interpolation': 'bilinear',
'crop_to_aspect_ratio': True,
}, 12, 14))
def test_up_sampling(self, kwargs, expected_height, expected_width):
self._run_test(kwargs, expected_height, expected_width)
def test_down_sampling_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(dtype)
layer = image_preprocessing.Resizing(
height=2, width=2, interpolation='nearest')
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[5, 7],
[13, 15]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_up_sampling_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 4), (1, 2, 2, 1)).astype(dtype)
layer = image_preprocessing.Resizing(
height=4, width=4, interpolation='nearest')
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[0, 0, 1, 1],
[0, 0, 1, 1],
[2, 2, 3, 3],
[2, 2, 3, 3]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 4, 4, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(('reshape_bilinear_10_by_4', {
'interpolation': 'bilinear'
}, 10, 4))
def test_reshaping(self, kwargs, expected_height, expected_width):
self._run_test(kwargs, expected_height, expected_width)
def test_invalid_interpolation(self):
with self.assertRaises(NotImplementedError):
image_preprocessing.Resizing(5, 5, 'invalid_interpolation')
def test_config_with_custom_name(self):
layer = image_preprocessing.Resizing(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.Resizing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_crop_to_aspect_ratio(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype('float32')
layer = image_preprocessing.Resizing(4, 2, crop_to_aspect_ratio=True)
output_image = layer(input_image)
expected_output = np.asarray([
[1, 2],
[5, 6],
[9, 10],
[13, 14],
]).astype('float32')
expected_output = np.reshape(expected_output, (1, 4, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_unbatched_image(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (4, 4, 1)).astype('float32')
layer = image_preprocessing.Resizing(2, 2, interpolation='nearest')
output_image = layer(input_image)
expected_output = np.asarray([
[5, 7],
[13, 15],
]).astype('float32')
expected_output = np.reshape(expected_output, (2, 2, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(('crop_to_aspect_ratio_false', False),
('crop_to_aspect_ratio_true', True))
def test_ragged_image(self, crop_to_aspect_ratio):
with test_utils.use_gpu():
inputs = tf.ragged.constant([
np.ones((8, 8, 1)),
np.ones((8, 4, 1)),
np.ones((4, 8, 1)),
np.ones((2, 2, 1)),
], dtype='float32')
layer = image_preprocessing.Resizing(
2,
2,
interpolation='nearest',
crop_to_aspect_ratio=crop_to_aspect_ratio)
outputs = layer(inputs)
expected_output = [[[[1.], [1.]], [[1.], [1.]]],
[[[1.], [1.]], [[1.], [1.]]],
[[[1.], [1.]], [[1.], [1.]]],
[[[1.], [1.]], [[1.], [1.]]]]
self.assertIsInstance(outputs, tf.Tensor)
self.assertNotIsInstance(outputs, tf.RaggedTensor)
self.assertAllEqual(expected_output, outputs)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.Resizing(2, 2)
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.Resizing(2, 2, dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
@parameterized.named_parameters(
('batch_crop_to_aspect_ratio', True, True),
('batch_dont_crop_to_aspect_ratio', False, True),
('single_sample_crop_to_aspect_ratio', True, False),
('single_sample_dont_crop_to_aspect_ratio', False, False),
)
def test_static_shape_inference(self, crop_to_aspect_ratio, batch):
channels = 3
input_height = 8
input_width = 8
target_height = 4
target_width = 6
layer = image_preprocessing.Resizing(
target_height, target_width, crop_to_aspect_ratio=crop_to_aspect_ratio)
unit_test = self
@tf.function
def tf_function(img):
unit_test.assertListEqual([input_height, input_width, channels],
img.shape.as_list()[-3:])
img = layer(img)
unit_test.assertListEqual([target_height, target_width, channels],
img.shape.as_list()[-3:])
return img
with test_utils.use_gpu():
if batch:
input_shape = (2, input_height, input_width, channels)
else:
input_shape = (input_height, input_width, channels)
img_data = np.random.random(size=input_shape).astype('float32')
tf_function(img_data)
def get_numpy_center_crop(images, expected_height, expected_width):
orig_height = images.shape[1]
orig_width = images.shape[2]
height_start = int((orig_height - expected_height) / 2)
width_start = int((orig_width - expected_width) / 2)
height_end = height_start + expected_height
width_end = width_start + expected_width
return images[:, height_start:height_end, width_start:width_end, :]
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class CenterCropTest(test_combinations.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height': expected_height, 'width': expected_width}
input_images = np.random.random(
(num_samples, orig_height, orig_width, channels)).astype(np.float32)
expected_output = get_numpy_center_crop(input_images, expected_height,
expected_width)
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.CenterCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
input_data=input_images,
expected_output=expected_output,
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(('center_crop_3_by_4', 3, 4),
('center_crop_3_by_2', 3, 2))
def test_center_crop_aligned(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(('center_crop_4_by_5', 4, 5),
('center_crop_4_by_3', 4, 3))
def test_center_crop_mis_aligned(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(('center_crop_4_by_6', 4, 6),
('center_crop_3_by_2', 3, 2))
def test_center_crop_half_mis_aligned(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
def test_input_smaller_than_crop_box(self):
np.random.seed(1337)
height, width = 10, 8
inp = np.random.random((12, 3, 3, 3))
with test_utils.use_gpu():
layer = image_preprocessing.CenterCrop(height, width)
actual_output = layer(inp)
# In this case, output should equal resizing with crop_to_aspect ratio.
resize_layer = image_preprocessing.Resizing(
height, width, crop_to_aspect_ratio=True)
expected_output = resize_layer(inp)
self.assertAllEqual(expected_output, actual_output)
def test_config_with_custom_name(self):
layer = image_preprocessing.CenterCrop(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.CenterCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (4, 4, 1)).astype('float32')
layer = image_preprocessing.CenterCrop(2, 2)
output_image = layer(input_image)
expected_output = np.asarray([
[5, 6],
[9, 10],
]).astype('float32')
expected_output = np.reshape(expected_output, (2, 2, 1))
self.assertAllEqual(expected_output, output_image)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.CenterCrop(2, 2)
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.CenterCrop(2, 2, dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomCropTest(test_combinations.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height': expected_height, 'width': expected_width}
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.RandomCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
def test_input_smaller_than_crop_box(self):
np.random.seed(1337)
height, width = 10, 8
inp = np.random.random((12, 3, 3, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp)
# In this case, output should equal resizing with crop_to_aspect ratio.
resize_layer = image_preprocessing.Resizing(
height, width, crop_to_aspect_ratio=True)
expected_output = resize_layer(inp)
self.assertAllEqual(expected_output, actual_output)
def test_training_with_mock(self):
np.random.seed(1337)
height, width = 3, 4
height_offset = np.random.randint(low=0, high=3)
width_offset = np.random.randint(low=0, high=5)
mock_offset = [height_offset, width_offset]
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator, 'random_uniform', return_value=mock_offset):
inp = np.random.random((12, 5, 8, 3))
actual_output = layer(inp, training=True)
expected_output = inp[:, height_offset:(height_offset + height),
width_offset:(width_offset + width), :]
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(('random_crop_4_by_6', 4, 6),
('random_crop_3_by_2', 3, 2))
def test_random_crop_output_shape(self, expected_height, expected_width):
self._run_test(expected_height, expected_width)
def test_random_crop_full_height(self):
self._run_test(5, 2)
def test_random_crop_full_width(self):
self._run_test(3, 8)
def test_random_crop_full(self):
np.random.seed(1337)
height, width = 8, 16
inp = np.random.random((12, 8, 16, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=False)
self.assertAllClose(inp, actual_output)
def test_predicting_with_mock_longer_height(self):
np.random.seed(1337)
height, width = 3, 3
inp = np.random.random((12, 10, 6, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=False)
resized_inp = tf.image.resize(inp, size=[5, 3])
expected_output = resized_inp[:, 1:4, :, :]
self.assertAllClose(expected_output, actual_output)
def test_predicting_with_mock_longer_width(self):
np.random.seed(1337)
height, width = 4, 6
inp = np.random.random((12, 8, 16, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=False)
resized_inp = tf.image.resize(inp, size=[4, 8])
expected_output = resized_inp[:, :, 1:7, :]
self.assertAllClose(expected_output, actual_output)
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomCrop(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
np.random.seed(1337)
inp = np.random.random((16, 16, 3))
mock_offset = [2, 2]
with test_utils.use_gpu():
layer = image_preprocessing.RandomCrop(8, 8)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator,
'random_uniform',
return_value=mock_offset):
actual_output = layer(inp, training=True)
self.assertAllClose(inp[2:10, 2:10, :], actual_output)
@test_utils.run_v2_only
def test_uint8_input(self):
inputs = keras.Input((128, 128, 3), batch_size=2, dtype=tf.uint8)
layer = image_preprocessing.RandomCrop(64, 64)
self.assertAllEqual(layer(inputs).dtype, 'float32')
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.RandomCrop(2, 2)
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.RandomCrop(2, 2, dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
class RescalingTest(test_combinations.TestCase):
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_rescaling_base(self):
kwargs = {'scale': 1. / 127.5, 'offset': -1.}
test_utils.layer_test(
image_preprocessing.Rescaling,
kwargs=kwargs,
input_shape=(2, 5, 6, 3),
expected_output_shape=(None, 5, 6, 3))
@test_utils.run_v2_only
def test_rescaling_correctness_float(self):
layer = image_preprocessing.Rescaling(scale=1. / 127.5, offset=-1.)
inputs = tf.random.uniform((2, 4, 5, 3))
outputs = layer(inputs)
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1. / 127.5) - 1)
@test_utils.run_v2_only
def test_rescaling_correctness_int(self):
layer = image_preprocessing.Rescaling(scale=1. / 127.5, offset=-1)
inputs = tf.random.uniform((2, 4, 5, 3), 0, 100, dtype='int32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype.name, 'float32')
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1. / 127.5) - 1)
def test_config_with_custom_name(self):
layer = image_preprocessing.Rescaling(0.5, name='rescaling')
config = layer.get_config()
layer_1 = image_preprocessing.Rescaling.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_unbatched_image(self):
layer = image_preprocessing.Rescaling(scale=1. / 127.5, offset=-1)
inputs = tf.random.uniform((4, 5, 3))
outputs = layer(inputs)
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1. / 127.5) - 1)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.Rescaling(0.5)
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.Rescaling(0.5, dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomFlipTest(test_combinations.TestCase):
def _run_test(self, mode, expected_output=None, mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = [0.0 for _ in range(num_samples)]
if mode == 'horizontal_and_vertical':
mock_random *= 2
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
expected_output = inp
if mode == 'horizontal' or mode == 'horizontal_and_vertical':
expected_output = np.flip(expected_output, axis=2)
if mode == 'vertical' or mode == 'horizontal_and_vertical':
expected_output = np.flip(expected_output, axis=1)
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
side_effect=mock_random,
):
with test_utils.use_gpu():
layer = image_preprocessing.RandomFlip(mode)
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
('random_flip_horizontal', 'horizontal'),
('random_flip_vertical', 'vertical'),
('random_flip_both', 'horizontal_and_vertical'))
def test_random_flip(self, mode):
self._run_test(mode)
def test_random_flip_horizontal_half(self):
np.random.seed(1337)
mock_random = [0.0, 1.0]
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=1)
self._run_test('horizontal', expected_output, mock_random)
def test_random_flip_vertical_half(self):
np.random.seed(1337)
mock_random = [0.0, 1.0]
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=0)
self._run_test('vertical', expected_output, mock_random)
def test_random_flip_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
def test_random_flip_default(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = np.flip(np.flip(input_images, axis=1), axis=2)
mock_random = [0.0, 0.0, 0.0, 0.0]
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
side_effect=mock_random,
):
with self.cached_session():
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=True)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomFlip(name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomFlip.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_random_flip_unbatched_image(self):
input_image = np.random.random((4, 4, 1)).astype(np.float32)
expected_output = np.flip(input_image, axis=0)
# mock_random = np.reshape([0.], [1, 1, 1])
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
return_value=0.,
):
with self.cached_session():
layer = image_preprocessing.RandomFlip('vertical')
actual_output = layer(input_image, training=True)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.RandomFlip()
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.RandomFlip(dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomContrastTest(test_combinations.TestCase):
def _run_test(self, lower, upper, expected_output=None, mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = 0.2
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
# reduce mean on height.
inp_mean = np.mean(inp, axis=1, keepdims=True)
# reduce mean on width.
inp_mean = np.mean(inp_mean, axis=2, keepdims=True)
expected_output = (inp - inp_mean) * mock_random + inp_mean
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
return_value=mock_random,
):
with test_utils.use_gpu():
layer = image_preprocessing.RandomContrast((lower, upper))
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(('random_contrast_2_by_5', 0.2, 0.5),
('random_contrast_2_by_13', 0.2, 1.3),
('random_contrast_5_by_2', 0.5, 0.2),
('random_contrast_10_by_10', 1.0, 1.0))
def test_random_contrast(self, lower, upper):
self._run_test(lower, upper)
@parameterized.named_parameters(('random_contrast_amplitude_2', 0.2),
('random_contrast_amplitude_5', 0.5))
def test_random_contrast_amplitude(self, amplitude):
input_images = np.random.random((2, 5, 8, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomContrast(amplitude)
layer(input_images)
def test_random_contrast_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
def test_random_contrast_int_dtype(self):
input_images = np.random.randint(low=0, high=255, size=(2, 5, 8, 3))
with test_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
layer(input_images)
def test_random_contrast_invalid_bounds(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((-0.1, .5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((1.1, .5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((0.1, -0.2))
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomContrast((.5, .6), name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomContrast.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_output_value_clip(self):
input_images = np.random.random((5, 8, 3)).astype(np.float32) * 255.0
# Give a factor range [1.0, 11.0] so that it will produce large contrast.
layer = image_preprocessing.RandomContrast((0.0, 10.0))
output = layer(input_images)
self.assertLessEqual(tf.reduce_max(output), 255.0)
self.assertGreaterEqual(tf.reduce_min(output), 0.0)
def test_unbatched_image(self):
np.random.seed(1337)
mock_random = 0.2
inp = np.random.random((4, 4, 1))
inp_mean = np.mean(inp, axis=0, keepdims=True)
inp_mean = np.mean(inp_mean, axis=1, keepdims=True)
expected_output = (inp - inp_mean) * mock_random + inp_mean
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops,
'stateless_random_uniform',
return_value=mock_random,
):
with test_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.2, 0.5))
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.RandomContrast((.5, .6))
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.RandomContrast((.5, .6), dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomBrightnessTest(test_combinations.TestCase):
def test_factor_input_validation(self):
with self.assertRaisesRegex(ValueError, r'in the range \[-1.0, 1.0\]'):
image_preprocessing.RandomBrightness(2.0)
with self.assertRaisesRegex(ValueError, 'list of two numbers'):
image_preprocessing.RandomBrightness([1.0])
with self.assertRaisesRegex(ValueError, 'should be a number'):
image_preprocessing.RandomBrightness('one')
def test_factor_normalize(self):
layer = image_preprocessing.RandomBrightness(1.0)
self.assertEqual(layer._factor, [-1.0, 1.0])
layer = image_preprocessing.RandomBrightness((0.5, 0.3))
self.assertEqual(layer._factor, [0.3, 0.5])
layer = image_preprocessing.RandomBrightness(-0.2)
self.assertEqual(layer._factor, [-0.2, 0.2])
@test_utils.run_v2_only
def test_output_value_range(self):
# Always scale up to 255
layer = image_preprocessing.RandomBrightness([1.0, 1.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
output_min = tf.math.reduce_min(output)
output_max = tf.math.reduce_max(output)
self.assertEqual(output_min, 255)
self.assertEqual(output_max, 255)
# Always scale down to 0
layer = image_preprocessing.RandomBrightness([-1.0, -1.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
output_min = tf.math.reduce_min(output)
output_max = tf.math.reduce_max(output)
self.assertEqual(output_min, 0)
self.assertEqual(output_max, 0)
def test_output(self):
# Always scale up, but randomly between 0 ~ 255
layer = image_preprocessing.RandomBrightness([0, 1.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
diff = output - inputs
self.assertGreaterEqual(tf.math.reduce_min(diff), 0)
self.assertGreater(tf.math.reduce_mean(diff), 0)
# Always scale down, but randomly between 0 ~ 255
layer = image_preprocessing.RandomBrightness([-1.0, 0.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
diff = output - inputs
self.assertLessEqual(tf.math.reduce_max(diff), 0)
self.assertLess(tf.math.reduce_mean(diff), 0)
@test_utils.run_v2_only
def test_scale_output(self):
layer = image_preprocessing.RandomBrightness([0, 1.0], seed=1337)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
# Create a new layer with same seed but different value range
layer2 = image_preprocessing.RandomBrightness(
[0, 1.0], value_range=[0, 1], seed=1337)
inputs2 = inputs / 255.0
output2 = layer2(inputs2)
# Make sure the outputs are the same, but just scaled with 255
self.assertAllClose(output, output2 * 255.0)
def test_different_adjustment_within_batch(self):
layer = image_preprocessing.RandomBrightness([0.2, 0.3])
inputs = np.zeros(shape=(2, 10, 10, 3)) # 2 images with all zeros
output = layer(inputs)
diff = output - inputs
# Make sure two images gets the different adjustment
self.assertNotAllClose(diff[0], diff[1])
# Make sure all the pixel are the same with the same image
image1 = output[0]
# The reduced mean pixel value among width and height are the same as
# any of the pixel in the image.
self.assertAllClose(
tf.reduce_mean(image1), image1[0, 0, 0], rtol=1e-5, atol=1e-5)
def test_inference(self):
layer = image_preprocessing.RandomBrightness([0, 1.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs, training=False)
self.assertAllClose(inputs, output)
@test_utils.run_v2_only
def test_dtype(self):
layer = image_preprocessing.RandomBrightness([0, 1.0])
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output = layer(inputs)
self.assertEqual(output.dtype, tf.float32)
layer = image_preprocessing.RandomBrightness([0, 1.0], dtype='uint8')
output = layer(inputs)
self.assertEqual(output.dtype, tf.uint8)
def test_seed(self):
layer = image_preprocessing.RandomBrightness([0, 1.0], seed=1337)
inputs = np.random.randint(0, 255, size=(224, 224, 3))
output_1 = layer(inputs)
layer2 = image_preprocessing.RandomBrightness([0, 1.0], seed=1337)
output_2 = layer2(inputs)
self.assertAllClose(output_1, output_2)
def test_config(self):
layer = image_preprocessing.RandomBrightness(
[0, 1.0], value_range=[0.0, 1.0], seed=1337)
config = layer.get_config()
self.assertEqual(config['factor'], [0.0, 1.0])
self.assertEqual(config['value_range'], [0.0, 1.0])
self.assertEqual(config['seed'], 1337)
reconstructed_layer = image_preprocessing.RandomBrightness.from_config(
config)
self.assertEqual(reconstructed_layer._factor, layer._factor)
self.assertEqual(reconstructed_layer._value_range, layer._value_range)
self.assertEqual(reconstructed_layer._seed, layer._seed)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomTranslationTest(test_combinations.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height_factor': height_factor, 'width_factor': width_factor}
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.RandomTranslation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(
('random_translate_4_by_6', .4, .6), ('random_translate_3_by_2', .3, .2),
('random_translate_tuple_factor', (-.5, .4), (.2, .3)))
def test_random_translation(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_translation_up_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-.2, -.2), width_factor=0.)
output_image = layer(input_image)
expected_output = np.asarray([
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[20, 21, 22, 23, 24],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_up_numeric_constant(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-.2, -.2), width_factor=0., fill_mode='constant')
output_image = layer(input_image)
expected_output = np.asarray([
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[0, 0, 0, 0, 0],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_down_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by .2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(.2, .2), width_factor=0.)
output_image = layer(input_image)
expected_output = np.asarray([
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_asymmetric_size_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 8, 2, 1)).astype(dtype)
# Shifting by .5 * 8 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(.5, .5), width_factor=0.)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[6, 7],
[4, 5],
[2, 3],
[0, 1],
[0, 1],
[2, 3],
[4, 5],
[6, 7],
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 8, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_down_numeric_constant(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(.2, .2), width_factor=0., fill_mode='constant')
output_image = layer(input_image)
expected_output = np.asarray([
[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_left_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by .2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=0., width_factor=(-.2, -.2))
output_image = layer(input_image)
expected_output = np.asarray([
[1, 2, 3, 4, 4],
[6, 7, 8, 9, 9],
[11, 12, 13, 14, 14],
[16, 17, 18, 19, 19],
[21, 22, 23, 24, 24],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_left_numeric_constant(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=0., width_factor=(-.2, -.2), fill_mode='constant')
output_image = layer(input_image)
expected_output = np.asarray([
[1, 2, 3, 4, 0],
[6, 7, 8, 9, 0],
[11, 12, 13, 14, 0],
[16, 17, 18, 19, 0],
[21, 22, 23, 24, 0],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomTranslation(.5, .5)
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomTranslation(.5, .6, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomTranslation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.int64)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-.2, -.2), width_factor=0.)
output_image = layer(input_image)
expected_output = np.asarray([
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[20, 21, 22, 23, 24],
]).astype(np.int64)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllEqual(expected_output, output_image)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.RandomTranslation(.5, .6)
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.RandomTranslation(.5, .6, dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomTransformTest(test_combinations.TestCase):
def _run_random_transform_with_mock(self,
transform_matrix,
expected_output,
mode,
fill_value=0.0,
interpolation='bilinear'):
inp = np.arange(15).reshape((1, 5, 3, 1)).astype(np.float32)
with self.cached_session():
output = image_preprocessing.transform(
inp,
transform_matrix,
fill_mode=mode,
fill_value=fill_value,
interpolation=interpolation)
self.assertAllClose(expected_output, output)
def test_random_translation_reflect(self):
# reflected output is (dcba|abcd|dcba)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 1., 2.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[12., 13., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
# Test left shift by 1.
# reflected output is (dcba|abcd|dcba)
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 2.],
[4., 5., 5.],
[7., 8., 8.],
[10., 11., 11.],
[13., 14., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[3., 3., 4],
[6., 6., 7.],
[9., 9., 10.],
[12., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
def test_random_translation_wrap(self):
# warpped output is (abcd|abcd|abcd)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[12., 13., 14.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[0., 1., 2.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 0.],
[4., 5., 3.],
[7., 8., 6.],
[10., 11., 9.],
[13., 14., 12.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[2., 0., 1.],
[5., 3., 4],
[8., 6., 7.],
[11., 9., 10.],
[14., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
def test_random_translation_nearest(self):
# nearest output is (aaaa|abcd|dddd)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 1., 2.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[12., 13., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 2.],
[4., 5., 5.],
[7., 8., 8.],
[10., 11., 11.],
[13., 14., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[3., 3., 4],
[6., 6., 7.],
[9., 9., 10.],
[12., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
def test_random_translation_constant_0(self):
# constant output is (0000|abcd|0000)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 0.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[0., 0., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 0.],
[4., 5., 0.],
[7., 8., 0.],
[10., 11., 0.],
[13., 14., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[0., 3., 4],
[0., 6., 7.],
[0., 9., 10.],
[0., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
def test_random_translation_constant_1(self):
with tf.compat.forward_compatibility_horizon(2020, 8, 6):
# constant output is (1111|abcd|1111)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 1., 1.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[1., 1., 1.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 1.],
[4., 5., 1.],
[7., 8., 1.],
[10., 11., 1.],
[13., 14., 1.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 0., 1.],
[1., 3., 4],
[1., 6., 7.],
[1., 9., 10.],
[1., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
def test_random_translation_nearest_interpolation(self):
# nearest output is (aaaa|abcd|dddd)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 0.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode='constant',
interpolation='nearest')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[0., 0., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode='constant',
interpolation='nearest')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 0.],
[4., 5., 0.],
[7., 8., 0.],
[10., 11., 0.],
[13., 14., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode='constant',
interpolation='nearest')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[0., 3., 4],
[0., 6., 7.],
[0., 9., 10.],
[0., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix,
expected_output,
mode='constant',
interpolation='nearest')
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomRotationTest(test_combinations.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'factor': factor}
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.RandomRotation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(('random_rotate_4', .4),
('random_rotate_3', .3),
('random_rotate_tuple_factor', (-.5, .4)))
def test_random_rotation(self, factor):
self._run_test(factor)
def test_random_rotation_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomRotation(.5)
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
def test_distribution_strategy(self):
"""Tests that RandomRotation can be created within distribution strategies."""
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
with test_utils.use_gpu():
strat = tf.distribute.MirroredStrategy(devices=['cpu', 'gpu'])
with strat.scope():
layer = image_preprocessing.RandomRotation(.5)
output = strat.run(lambda: layer(input_images, training=True))
values = output.values
self.assertAllEqual(2, len(values))
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomRotation(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomRotation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.float32)
# 180 rotation.
layer = image_preprocessing.RandomRotation(factor=(0.5, 0.5))
output_image = layer(input_image)
expected_output = np.asarray([
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]).astype(np.float32)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllClose(expected_output, output_image)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.RandomRotation(.5)
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.RandomRotation(.5, dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomZoomTest(test_combinations.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height_factor': height_factor, 'width_factor': width_factor}
with test_utils.use_gpu():
test_utils.layer_test(
image_preprocessing.RandomZoom,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(
('random_zoom_4_by_6', -.4, -.6), ('random_zoom_2_by_3', -.2, -.3),
('random_zoom_tuple_factor', (-.4, -.5), (-.2, -.3)))
def test_random_zoom_in(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
@parameterized.named_parameters(
('random_zoom_4_by_6', .4, .6), ('random_zoom_2_by_3', .2, .3),
('random_zoom_tuple_factor', (.4, .5), (.2, .3)))
def test_random_zoom_out(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_zoom_in_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = image_preprocessing.RandomZoom((-.5, -.5), (-.5, -.5),
interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray([
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = image_preprocessing.RandomZoom((.5, .5), (.8, .8),
fill_mode='constant',
interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray([
[0, 0, 0, 0, 0],
[0, 5, 7, 9, 0],
[0, 10, 12, 14, 0],
[0, 20, 22, 24, 0],
[0, 0, 0, 0, 0],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric_preserve_aspect_ratio(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = image_preprocessing.RandomZoom((.5, .5),
fill_mode='constant',
interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray([
[0, 0, 0, 0, 0],
[0, 6, 7, 9, 0],
[0, 11, 12, 14, 0],
[0, 21, 22, 24, 0],
[0, 0, 0, 0, 0],
]).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomZoom(.5, .5)
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomZoom(.5, .6, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomZoom.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.int64)
layer = image_preprocessing.RandomZoom((-.5, -.5), (-.5, -.5),
interpolation='nearest')
output_image = layer(input_image)
expected_output = np.asarray([
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18],
]).astype(np.int64)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllEqual(expected_output, output_image)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.RandomZoom(.5, .5)
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.RandomZoom(.5, .5, dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomHeightTest(test_combinations.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with test_utils.use_gpu():
img = np.random.random((num_samples, orig_height, orig_width, channels))
layer = image_preprocessing.RandomHeight(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[2], 8)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(('random_height_4_by_6', (.4, .6)),
('random_height_3_by_2', (-.3, .2)),
('random_height_3', .3))
def test_random_height_basic(self, factor):
self._run_test(factor)
def test_valid_random_height(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0.6
with test_utils.use_gpu():
img = np.random.random((12, 5, 8, 3))
layer = image_preprocessing.RandomHeight(.4)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator, 'random_uniform', return_value=mock_factor):
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[1], 3)
def test_random_height_longer_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 6), (2, 3, 1)).astype(dtype)
layer = image_preprocessing.RandomHeight(factor=(1., 1.))
# Return type of RandomHeight() is float32 if `interpolation` is not
# set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
output_image = tf.cast(
layer(np.expand_dims(input_image, axis=0)), dtype=dtype)
# pyformat: disable
expected_output = np.asarray([
[0, 1, 2],
[0.75, 1.75, 2.75],
[2.25, 3.25, 4.25],
[3, 4, 5]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 4, 3, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_height_shorter_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 8), (4, 2, 1)).astype(dtype)
layer = image_preprocessing.RandomHeight(
factor=(-.5, -.5), interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([
[2, 3],
[6, 7]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_height_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomHeight((-1.5, .4))
def test_random_height_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomHeight(.5)
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomHeight(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomHeight.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0.6
with test_utils.use_gpu():
img = np.random.random((5, 8, 3))
layer = image_preprocessing.RandomHeight(.4)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator, 'random_uniform', return_value=mock_factor):
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 3)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.RandomHeight(.2)
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.RandomHeight(.2, dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class RandomWidthTest(test_combinations.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with test_utils.use_gpu():
img = np.random.random((num_samples, orig_height, orig_width, channels))
layer = image_preprocessing.RandomWidth(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[1], 5)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(('random_width_4_by_6', (.4, .6)),
('random_width_3_by_2', (-.3, .2)),
('random_width_3', .3))
def test_random_width_basic(self, factor):
self._run_test(factor)
def test_valid_random_width(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0.6
with test_utils.use_gpu():
img = np.random.random((12, 8, 5, 3))
layer = image_preprocessing.RandomWidth(.4)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator, 'random_uniform', return_value=mock_factor):
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[2], 3)
def test_random_width_longer_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 6), (3, 2, 1)).astype(dtype)
layer = image_preprocessing.RandomWidth(factor=(1., 1.))
# Return type of RandomWidth() is float32 if `interpolation` is not
# set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
output_image = tf.cast(
layer(np.expand_dims(input_image, axis=0)), dtype=dtype)
# pyformat: disable
expected_output = np.asarray([
[0, 0.25, 0.75, 1],
[2, 2.25, 2.75, 3],
[4, 4.25, 4.75, 5]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 3, 4, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_width_shorter_numeric(self):
for dtype in (np.int64, np.float32):
with test_utils.use_gpu():
input_image = np.reshape(np.arange(0, 8), (2, 4, 1)).astype(dtype)
layer = image_preprocessing.RandomWidth(
factor=(-.5, -.5), interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([
[1, 3],
[5, 7]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_width_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomWidth((-1.5, .4))
def test_random_width_inference(self):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with test_utils.use_gpu():
layer = image_preprocessing.RandomWidth(.5)
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
@test_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomWidth(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomWidth.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0.6
with test_utils.use_gpu():
img = np.random.random((8, 5, 3))
layer = image_preprocessing.RandomWidth(.4)
with tf.compat.v1.test.mock.patch.object(
layer._random_generator, 'random_uniform', return_value=mock_factor):
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[1], 3)
@test_utils.run_v2_only
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype='float64')
layer = image_preprocessing.RandomWidth(.2)
self.assertAllEqual(layer(inputs).dtype, 'float32')
layer = image_preprocessing.RandomWidth(.2, dtype='uint8')
self.assertAllEqual(layer(inputs).dtype, 'uint8')
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class LearningPhaseTest(test_combinations.TestCase):
def test_plain_call(self):
layer = image_preprocessing.RandomWidth(.5, seed=123)
shape = (12, 12, 3)
img = np.random.random((12,) + shape)
out = layer(img) # Default to training=True
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = layer(img, training=True)
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = layer(img, training=False)
self.assertEqual(tuple(int(i) for i in out.shape[1:]), shape)
def test_call_in_container(self):
layer1 = image_preprocessing.RandomWidth(.5, seed=123)
layer2 = image_preprocessing.RandomHeight(.5, seed=123)
seq = sequential.Sequential([layer1, layer2])
shape = (12, 12, 3)
img = np.random.random((12,) + shape)
out = seq(img) # Default to training=True
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = seq(img, training=True)
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = seq(img, training=False)
self.assertEqual(tuple(int(i) for i in out.shape[1:]), shape)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class DeterminismTest(test_combinations.TestCase):
@parameterized.named_parameters(
('random_flip', image_preprocessing.RandomFlip),
('random_contrast',
functools.partial(image_preprocessing.RandomContrast, factor=1.)),
('random_crop',
functools.partial(image_preprocessing.RandomCrop, height=2, width=2)),
('random_translation',
functools.partial(image_preprocessing.RandomTranslation, 0.3, 0.2)),
('random_rotation',
functools.partial(image_preprocessing.RandomRotation, 0.5)),
('random_zoom', functools.partial(image_preprocessing.RandomZoom, 0.2)),
('random_height', functools.partial(image_preprocessing.RandomHeight,
0.4)),
('random_width', functools.partial(image_preprocessing.RandomWidth, 0.3)),
)
def test_seed_constructor_arg(self, layer_cls):
input_image = np.random.random((2, 5, 8, 3)).astype(np.float32)
layer1 = layer_cls(seed=0.)
layer2 = layer_cls(seed=0.)
layer1_output = layer1(input_image)
layer2_output = layer2(input_image)
self.assertAllClose(layer1_output.numpy().tolist(),
layer2_output.numpy().tolist())
class RandomAddLayer(image_preprocessing.BaseImageAugmentationLayer):
def __init__(self, value_range=(0., 1.0), fixed_value=None, **kwargs):
super().__init__(**kwargs)
self.value_range = value_range
self.fixed_value = fixed_value
def get_random_tranformation(self):
if self.fixed_value:
return self.fixed_value
return self._random_generator.random_uniform(
[], minval=self.value_range[0], maxval=self.value_range[1])
def augment_image(self, image, transformation=None):
return image + transformation
def augment_label(self, label, transformation=None):
return label + transformation
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class BaseImageAugmentationLayerTest(test_combinations.TestCase):
def test_augment_single_image(self):
add_layer = RandomAddLayer(fixed_value=2.0)
image = np.random.random(size=(8, 8, 3)).astype('float32')
output = add_layer(image)
self.assertAllClose(image + 2.0, output)
def test_augment_batch_images(self):
add_layer = RandomAddLayer()
images = | np.random.random(size=(2, 8, 8, 3)) | numpy.random.random |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 11 20:02:37 2017
@author: tzave
"""
import numpy as np
def swap(r, p, E, cols=None):
if cols is None:
b = np.copy(E[r, :])
E[r, :] = E[p, :]
E[p, :] = np.copy(b)
else:
b = np.copy(E[:, r])
E[:, r] = E[:, p]
E[:, p] = np.copy(b)
def ForwardSubstitution(A, b):
rows = A.shape[0]
y = np.zeros(rows)
for i in range(rows):
s = 0.
for j in range(i):
s = s + A[i, j] * y[j]
y[i] = (b[i] - s) / A[i, i]
return y
def BackSubstitution(A, b):
rows = A.shape[0]
x = np.zeros(rows)
for i in reversed(range(rows)):
s = 0
for j in range(i + 1, rows):
s = s + A[i, j] * x[j]
x[i] = (b[i] - s) / A[i, i]
return x
def foundNonZeroPivot(r, E):
rows = E.shape[0]
PivotFound = False
for p in range(r, rows - 1):
if np.isclose(E[p, r], 0): # Keep looking for non-zero pivot
continue
else: # if pivot is found
PivotFound = True
if p > r: # Only swap if p>r
swap(r, p, E)
break
return PivotFound
def partialPivot(r, A):
rows = A.shape[0] # Number of rows
Amax = np.abs(A[r, r])
rmax = r
for p in range(r, rows):
Apr = np.abs(A[p, r])
if Apr > Amax:
Amax = Apr
rmax = p
if rmax != r:
swap(r, rmax, A)
return
def completePivot(r, A):
rows, cols = A.shape
cols = cols - 1 # ignore the last column of the augmented matrix
Amax = np.abs(A[r, r])
rmax = r
cmax = r
for i in range(r, rows):
for j in range(r, cols):
Aij = np.abs(A[i, j])
if Aij > Amax:
Amax = Aij
rmax = i
cmax = j
if (rmax != r) and (cmax != r):
swap(r, rmax, A)
swap(r, cmax, A, True)
return
def GaussElimination(A, b, pivot=None):
isSingular = False
rows = A.shape[0]
b = b.reshape(rows, 1)
E = np.append(A, b, axis=1) # Append b as extra column
for r in range(rows - 1):
if pivot == 'partial':
partialPivot(r, E)
elif pivot == 'complete':
completePivot(r, E)
else: # Simple pivot is required to avoid division by 0
isSingular = not foundNonZeroPivot(r, E)
if isSingular:
break
for i in range(r + 1, rows):
if np.isclose(E[i, r], 0): # skip line if pivot is already 0
continue
m = - E[i, r]/E[r, r]
E[i][r] = 0
for j in range(r + 1, rows + 1):
E[i, j] = E[i, j] + m * E[r, j]
if isSingular:
print("Matrix is singular")
elif np.isclose(E[rows-1, rows-1], 0):
print("There is no unique solution")
else:
y = E[:, rows]
E = np.delete(E, [rows], axis=1)
x = BackSubstitution(E, y)
return {'E': E, 'x': x, 'isSingular': isSingular}
def LU(A):
rows, cols = A.shape
assert (rows == cols)
U = np.copy(A)
L = np.identity(rows)
for k in range(rows-1):
for j in range(k+1, rows):
L[j, k] = U[j, k] / U[k, k]
for p in range(k, rows):
U[j, p] = U[j, p] - L[j, k] * U[k, p]
return {'L': L, 'U': U}
def inverseU(U):
rows = U.shape[0]
Uinv = np.zeros(U.shape)
unit = np.identity(rows)
for j in range(rows):
b = unit[:, j]
Uinv[:, j] = BackSubstitution(U, b)
return Uinv
def IterativeMethod(A, d, tol, tau=0.1, N=None):
rows = A.shape[0]
Dinv = np.diag(1./A.diagonal())
CL = -np.tril(A, -1)
CU = -np.triu(A, 1)
L = np.dot(Dinv, CL)
U = np.dot(Dinv, CU)
xk = d
M = np.identity(rows) - U
Minv = inverseU(M)
C = np.dot(Dinv, d)
C = tau * np.dot(Minv, C)
T = (1.-tau) * np.identity(rows) + tau * np.dot(Minv, L)
i = 1
while (True if N is None else (i < N)):
x = np.dot(T, xk) + C
dx = x - xk # Check if scheme converges
if np.sqrt( | np.dot(dx, dx) | numpy.dot |
import math
import numpy as np
import hes5
from numpy import number
import os.path
from numba import jit
# suppresses annoying performance warnings about np.dot() being
# faster on contiguous arrays. should look at fixing it but this
# is good for now
from numba.core.errors import NumbaPerformanceWarning
import warnings
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)
from scipy.stats import gamma, multivariate_normal, uniform
import multiprocessing as mp
def kalman_filter(protein_at_observations,model_parameters,measurement_variance,derivative=True):
"""
Perform Kalman-Bucy filter based on observation of protein
copy numbers. This implements the filter described by Calderazzo et al., Bioinformatics (2018).
Parameters
----------
protein_at_observations : numpy array.
Observed protein. The dimension is n x 2, where n is the number of observation time points.
The first column is the time, and the second column is the observed protein copy number at
that time. The filter assumes that observations are generated with a fixed, regular time interval.
model_parameters : numpy array.
An array containing the model parameters in the following order:
repression_threshold, hill_coefficient, mRNA_degradation_rate,
protein_degradation_rate, basal_transcription_rate, translation_rate,
transcription_delay.
measurement_variance : float.
The variance in our measurement. This is given by Sigma_e in Calderazzo et. al. (2018).
derivative : bool.
True if you want derivative calculations, False if not.
Returns
-------
state_space_mean : numpy array.
An array of dimension n x 3, where n is the number of inferred time points.
The first column is time, the second column is the mean mRNA, and the third
column is the mean protein. Time points are generated every minute
state_space_variance : numpy array.
An array of dimension 2n x 2n.
[ cov( mRNA(t0:tn),mRNA(t0:tn) ), cov( protein(t0:tn),mRNA(t0:tn) ),
cov( mRNA(t0:tn),protein(t0:tn) ), cov( protein(t0:tn),protein(t0:tn) ]
state_space_mean_derivative : numpy array.
An array of dimension n x m x 2, where n is the number of inferred time points,
and m is the number of parameters. The m columns in the second dimension are the
derivative of the state mean with respect to each parameter. The two elements in
the third dimension represent the derivative of mRNA and protein respectively
state_space_variance_derivative : numpy array.
An array of dimension 7 x 2n x 2n.
[ d[cov( mRNA(t0:tn),mRNA(t0:tn) )]/d_theta, d[cov( protein(t0:tn),mRNA(t0:tn) )]/d_theta,
d[cov( mRNA(t0:tn),protein(t0:tn) )/]d_theta, d[cov( protein(t0:tn),protein(t0:tn) )]/d_theta ]
predicted_observation_distributions : numpy array.
An array of dimension n x 3 where n is the number of observation time points.
The first column is time, the second and third columns are the mean and variance
of the distribution of the expected observations at each time point, respectively.
predicted_observation_mean_derivatives : numpy array.
An array of dimension n x m x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space mean at each observation time point, wrt each parameter
predicted_observation_variance_derivatives : numpy array.
An array of dimension n x m x 2 x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space variance at each observation time point, wrt each parameter
"""
time_delay = model_parameters[6]
if protein_at_observations.reshape(-1,2).shape[0] == 1:
number_of_observations = 1.0
observation_time_step = 10.0
else:
number_of_observations = protein_at_observations.shape[0]
observation_time_step = protein_at_observations[1,0]-protein_at_observations[0,0]
# This is the time step dt in the forward euler scheme
discretisation_time_step = 1.0
# This is the delay as an integer multiple of the discretization timestep so that we can index with it
discrete_delay = int(np.around(time_delay/discretisation_time_step))
number_of_hidden_states = int(np.around(observation_time_step/discretisation_time_step))
initial_number_of_states = discrete_delay + 1
total_number_of_states = initial_number_of_states + (number_of_observations - 1)*number_of_hidden_states
# scaling factors for mRNA and protein respectively. For example, observation might be fluorescence,
# so the scaling would correspond to how light intensity relates to molecule number.
observation_transform = np.array([0.0,1.0])
state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative, predicted_observation_distributions, predicted_observation_mean_derivatives, predicted_observation_variance_derivatives = kalman_filter_state_space_initialisation(protein_at_observations,
model_parameters,
measurement_variance,
derivative)
# loop through observations and at each observation apply the Kalman prediction step and then the update step
# for observation_index, current_observation in enumerate(protein_at_observations[1:]):
for observation_index in range(len(protein_at_observations)-1):
if number_of_observations != 1:
current_observation = protein_at_observations[1+observation_index,:]
state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative = kalman_prediction_step(state_space_mean,
state_space_variance,
state_space_mean_derivative,
state_space_variance_derivative,
current_observation,
model_parameters,
observation_time_step,
derivative)
current_number_of_states = int(np.around(current_observation[0]/observation_time_step))*number_of_hidden_states + initial_number_of_states
# between the prediction and update steps we record the mean and sd for our likelihood, and the derivatives of the mean and variance for the
# derivative of the likelihood wrt the parameters
predicted_observation_distributions[observation_index + 1] = kalman_observation_distribution_parameters(predicted_observation_distributions,
current_observation,
state_space_mean,
state_space_variance,
current_number_of_states,
total_number_of_states,
measurement_variance,
observation_index)
if derivative:
predicted_observation_mean_derivatives[observation_index + 1], predicted_observation_variance_derivatives[observation_index + 1] = kalman_observation_derivatives(predicted_observation_mean_derivatives,
predicted_observation_variance_derivatives,
current_observation,
state_space_mean_derivative,
state_space_variance_derivative,
current_number_of_states,
total_number_of_states,
observation_index)
state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative = kalman_update_step(state_space_mean,
state_space_variance,
state_space_mean_derivative,
state_space_variance_derivative,
current_observation,
time_delay,
observation_time_step,
measurement_variance,
derivative)
return state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative, predicted_observation_distributions, predicted_observation_mean_derivatives, predicted_observation_variance_derivatives
def kalman_filter_state_space_initialisation(protein_at_observations,model_parameters,measurement_variance,derivative=True):
"""
A function for initialisation of the state space mean and variance, and update for the "negative" times that
are a result of the time delay. Initialises the negative times using the steady state of the deterministic system,
and then updates them with kalman_update_step.
Parameters
----------
protein_at_observations : numpy array.
Observed protein. The dimension is n x 2, where n is the number of observation time points.
The first column is the time, and the second column is the observed protein copy number at
that time. The filter assumes that observations are generated with a fixed, regular time interval.
model_parameters : numpy array.
An array containing the model parameters in the following order:
repression_threshold, hill_coefficient, mRNA_degradation_rate,
protein_degradation_rate, basal_transcription_rate, translation_rate,
transcription_delay.
measurement_variance : float.
The variance in our measurement. This is given by Sigma_e in Calderazzo et. al. (2018).
Returns
-------
state_space_mean : numpy array.
An array of dimension n x 3, where n is the number of inferred time points.
The first column is time, the second column is the mean mRNA, and the third
column is the mean protein. Time points are generated every minute
state_space_variance : numpy array.
An array of dimension 2n x 2n.
[ cov( mRNA(t0:tn),mRNA(t0:tn) ), cov( protein(t0:tn),mRNA(t0:tn) ),
cov( mRNA(t0:tn),protein(t0:tn) ), cov( protein(t0:tn),protein(t0:tn) ]
state_space_mean_derivative : numpy array.
An array of dimension n x m x 2, where n is the number of inferred time points,
and m is the number of parameters. The m columns in the second dimension are the
derivative of the state mean with respect to each parameter. The two elements in
the third dimension represent the derivative of mRNA and protein respectively
state_space_variance_derivative : numpy array.
An array of dimension 7 x 2n x 2n.
[ d[cov( mRNA(t0:tn),mRNA(t0:tn) )]/d_theta, d[cov( protein(t0:tn),mRNA(t0:tn) )]/d_theta,
d[cov( mRNA(t0:tn),protein(t0:tn) )/]d_theta, d[cov( protein(t0:tn),protein(t0:tn) )]/d_theta ]
predicted_observation_distributions : numpy array.
An array of dimension n x 3 where n is the number of observation time points.
The first column is time, the second and third columns are the mean and variance
of the distribution of the expected observations at each time point, respectively
predicted_observation_mean_derivatives : numpy array.
An array of dimension n x m x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space mean at each observation time point, wrt each parameter
predicted_observation_variance_derivatives : numpy array.
An array of dimension n x m x 2 x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space variance at each observation time point, wrt each parameter
"""
time_delay = model_parameters[6]
# This is the time step dt in the forward euler scheme
discretisation_time_step = 1.0
# This is the delay as an integer multiple of the discretization timestep so that we can index with it
discrete_delay = int(np.around(time_delay/discretisation_time_step))
if protein_at_observations.reshape(-1,2).shape[0] == 1:
observation_time_step = 10.0
number_of_observations = 1
else:
observation_time_step = protein_at_observations[1,0]-protein_at_observations[0,0]
number_of_observations = protein_at_observations.shape[0]
# 'synthetic' observations, which allow us to update backwards in time
number_of_hidden_states = int(np.around(observation_time_step/discretisation_time_step))
## initialise "negative time" with the mean and standard deviations of the LNA
initial_number_of_states = discrete_delay + 1
total_number_of_states = initial_number_of_states + (number_of_observations - 1)*number_of_hidden_states
state_space_mean = np.zeros((total_number_of_states,3))
state_space_mean[:initial_number_of_states,(1,2)] = hes5.calculate_steady_state_of_ode(repression_threshold=model_parameters[0],
hill_coefficient=model_parameters[1],
mRNA_degradation_rate=model_parameters[2],
protein_degradation_rate=model_parameters[3],
basal_transcription_rate=model_parameters[4],
translation_rate=model_parameters[5])
if protein_at_observations.reshape(-1,2).shape[0] == 1:
final_observation_time = 0
else:
final_observation_time = protein_at_observations[-1,0]
# assign time entries
state_space_mean[:,0] = np.linspace(protein_at_observations[0,0]-discrete_delay,final_observation_time,total_number_of_states)
# initialise initial covariance matrix
state_space_variance = np.zeros((2*(total_number_of_states),2*(total_number_of_states)))
# the top left block of the matrix corresponds to the mRNA covariance, see docstring above
initial_mRNA_scaling = 20.0
initial_mRNA_variance = state_space_mean[0,1]*initial_mRNA_scaling
np.fill_diagonal( state_space_variance[:initial_number_of_states,:initial_number_of_states] , initial_mRNA_variance)
# the bottom right block of the matrix corresponds to the mRNA covariance, see docstring above
initial_protein_scaling = 100.0
initial_protein_variance = state_space_mean[0,2]*initial_protein_scaling
np.fill_diagonal( state_space_variance[total_number_of_states:total_number_of_states + initial_number_of_states,
total_number_of_states:total_number_of_states + initial_number_of_states] , initial_protein_variance )
observation_transform = np.array([0.0,1.0])
predicted_observation_distributions = np.zeros((number_of_observations,3))
predicted_observation_distributions[0,0] = 0
predicted_observation_distributions[0,1] = observation_transform.dot(state_space_mean[initial_number_of_states-1,1:3])
# making it numba-ready
last_predicted_covariance_matrix = np.zeros((2,2))
for short_row_index, long_row_index in enumerate([initial_number_of_states-1,
total_number_of_states+initial_number_of_states-1]):
for short_column_index, long_column_index in enumerate([initial_number_of_states -1,
total_number_of_states+initial_number_of_states-1]):
last_predicted_covariance_matrix[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
predicted_observation_distributions[0,2] = (observation_transform.dot(
last_predicted_covariance_matrix).dot(observation_transform.transpose())
+
measurement_variance)
####################################################################
####################################################################
##
## initialise derivative arrays
##
####################################################################
####################################################################
#
state_space_mean_derivative = np.zeros((total_number_of_states,7,2))
state_space_variance_derivative = np.zeros((7,2*total_number_of_states,2*total_number_of_states))
predicted_observation_mean_derivatives = np.zeros((number_of_observations,7,2))
predicted_observation_mean_derivatives[0] = state_space_mean_derivative[initial_number_of_states-1]
predicted_observation_variance_derivatives = np.zeros((number_of_observations,7,2,2))
if derivative:
state_space_mean_derivative = np.zeros((total_number_of_states,7,2))
repression_threshold = model_parameters[0]
hill_coefficient = model_parameters[1]
mRNA_degradation_rate = model_parameters[2]
protein_degradation_rate = model_parameters[3]
basal_transcription_rate = model_parameters[4]
translation_rate = model_parameters[5]
transcription_delay = model_parameters[6]
steady_state_protein = state_space_mean[0,2]
hill_function_value = 1.0/(1.0+np.power(steady_state_protein/repression_threshold,hill_coefficient))
hill_function_derivative_value_wrt_protein = - hill_coefficient*np.power(steady_state_protein/repression_threshold,
hill_coefficient - 1)/( repression_threshold*
np.power(1.0+np.power( steady_state_protein/repression_threshold,
hill_coefficient),2))
protein_derivative_denominator_scalar = (basal_transcription_rate*translation_rate)/(mRNA_degradation_rate*protein_degradation_rate)
initial_protein_derivative_denominator = (protein_derivative_denominator_scalar*hill_function_derivative_value_wrt_protein) - 1
# assign protein derivative first, since mRNA derivative is given as a function of protein derivative
hill_function_derivative_value_wrt_repression = hill_coefficient*np.power(steady_state_protein/repression_threshold,
hill_coefficient)/( repression_threshold*
np.power(1.0+np.power( steady_state_protein/repression_threshold,
hill_coefficient),2))
hill_function_derivative_value_wrt_hill_coefficient = - np.log(steady_state_protein/repression_threshold)*np.power(steady_state_protein/repression_threshold,
hill_coefficient)/( np.power(1.0+np.power( steady_state_protein/repression_threshold,
hill_coefficient),2))
# repression threshold
state_space_mean_derivative[:initial_number_of_states,0,1] = - (protein_derivative_denominator_scalar*hill_function_derivative_value_wrt_repression)/(
initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,0,0] = (protein_degradation_rate/translation_rate)*state_space_mean_derivative[0,0,1]
# hill coefficient
state_space_mean_derivative[:initial_number_of_states,1,1] = - (protein_derivative_denominator_scalar*hill_function_derivative_value_wrt_hill_coefficient)/(
initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,1,0] = (protein_degradation_rate/translation_rate)*state_space_mean_derivative[0,1,1]
# mRNA degradation
state_space_mean_derivative[:initial_number_of_states,2,1] = (protein_derivative_denominator_scalar*hill_function_value)/(
mRNA_degradation_rate*initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,2,0] = (protein_degradation_rate/translation_rate)*state_space_mean_derivative[0,2,1]
# protein degradation
state_space_mean_derivative[:initial_number_of_states,3,1] = (protein_derivative_denominator_scalar*hill_function_value)/(
protein_degradation_rate*initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,3,0] = (steady_state_protein + protein_degradation_rate*state_space_mean_derivative[0,3,1])/translation_rate
# basal transcription
state_space_mean_derivative[:initial_number_of_states,4,1] = -(protein_derivative_denominator_scalar*hill_function_value)/(
basal_transcription_rate*initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,4,0] = (protein_degradation_rate/translation_rate)*state_space_mean_derivative[0,4,1]
# translation
state_space_mean_derivative[:initial_number_of_states,5,1] = -(protein_derivative_denominator_scalar*hill_function_value)/(
translation_rate*initial_protein_derivative_denominator)
state_space_mean_derivative[:initial_number_of_states,5,0] = -(protein_degradation_rate/translation_rate)*((steady_state_protein/translation_rate) -
state_space_mean_derivative[0,5,1])
# transcriptional delay
state_space_mean_derivative[:initial_number_of_states,6,1] = 0
state_space_mean_derivative[:initial_number_of_states,6,0] = 0
state_space_variance_derivative = np.zeros((7,2*total_number_of_states,2*total_number_of_states))
for parameter_index in range(7):
np.fill_diagonal(state_space_variance_derivative[parameter_index,:initial_number_of_states,:initial_number_of_states],
initial_mRNA_scaling*state_space_mean_derivative[0,parameter_index,0])
np.fill_diagonal(state_space_variance_derivative[parameter_index,
total_number_of_states:total_number_of_states + initial_number_of_states,
total_number_of_states:total_number_of_states + initial_number_of_states],
initial_protein_scaling*state_space_mean_derivative[0,parameter_index,1])
predicted_observation_mean_derivatives = np.zeros((number_of_observations,7,2))
predicted_observation_mean_derivatives[0] = state_space_mean_derivative[initial_number_of_states-1]
predicted_observation_variance_derivatives = np.zeros((number_of_observations,7,2,2))
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([initial_number_of_states-1,
total_number_of_states+initial_number_of_states-1]):
for short_column_index, long_column_index in enumerate([initial_number_of_states -1,
total_number_of_states+initial_number_of_states-1]):
predicted_observation_variance_derivatives[0,parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
# update the past ("negative time")
if protein_at_observations.reshape(-1,2).shape[0] == 1:
current_observation = protein_at_observations
else:
current_observation = protein_at_observations[0]
state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative = kalman_update_step(state_space_mean,
state_space_variance,
state_space_mean_derivative,
state_space_variance_derivative,
current_observation,
time_delay,
observation_time_step,
measurement_variance,
derivative)
return state_space_mean, state_space_variance, state_space_mean_derivative, state_space_variance_derivative, predicted_observation_distributions, predicted_observation_mean_derivatives, predicted_observation_variance_derivatives
# @jit(nopython = True)
def kalman_observation_distribution_parameters(predicted_observation_distributions,
current_observation,
state_space_mean,
state_space_variance,
current_number_of_states,
total_number_of_states,
measurement_variance,
observation_index):
"""
A function which updates the mean and variance for the distributions which describe the likelihood of
our observations, given some model parameters.
Parameters
----------
predicted_observation_distributions : numpy array.
An array of dimension n x 3 where n is the number of observation time points.
The first column is time, the second and third columns are the mean and variance
of the distribution of the expected observations at each time point, respectively
current_observation : int.
Observed protein at the current time. The dimension is 1 x 2.
The first column is the time, and the second column is the observed protein copy number at
that time
state_space_mean : numpy array
An array of dimension n x 3, where n is the number of inferred time points.
The first column is time, the second column is the mean mRNA, and the third
column is the mean protein. Time points are generated every minute
state_space_variance : numpy array.
An array of dimension 2n x 2n.
[ cov( mRNA(t0:tn),mRNA(t0:tn) ), cov( protein(t0:tn),mRNA(t0:tn) ),
cov( mRNA(t0:tn),protein(t0:tn) ), cov( protein(t0:tn),protein(t0:tn) ]
current_number_of_states : float.
The current number of (hidden and observed) states upto the current observation time point.
This includes the initial states (with negative time).
total_number_of_states : float.
The total number of states that will be predicted by the kalman_filter function
measurement_variance : float.
The variance in our measurement. This is given by Sigma_e in Calderazzo et. al. (2018).
observation_index : int.
The index for the current observation time in the main kalman_filter loop
Returns
-------
predicted_observation_distributions[observation_index + 1] : numpy array.
An array of dimension 1 x 3.
The first column is time, the second and third columns are the mean and variance
of the distribution of the expected observations at the current time point, respectively.
"""
observation_transform = np.array([0.0,1.0])
predicted_observation_distributions[observation_index+1,0] = current_observation[0]
predicted_observation_distributions[observation_index+1,1] = observation_transform.dot(state_space_mean[current_number_of_states-1,1:3])
# not using np.ix_-like indexing to make it numba-ready
last_predicted_covariance_matrix = np.zeros((2,2))
for short_row_index, long_row_index in enumerate([current_number_of_states-1,
total_number_of_states+current_number_of_states-1]):
for short_column_index, long_column_index in enumerate([current_number_of_states -1,
total_number_of_states+current_number_of_states-1]):
last_predicted_covariance_matrix[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
predicted_observation_distributions[observation_index+1,2] = (observation_transform.dot(
last_predicted_covariance_matrix).dot(observation_transform.transpose())
+
measurement_variance)
return predicted_observation_distributions[observation_index + 1]
# @jit(nopython = True)
def kalman_observation_derivatives(predicted_observation_mean_derivatives,
predicted_observation_variance_derivatives,
current_observation,
state_space_mean_derivative,
state_space_variance_derivative,
current_number_of_states,
total_number_of_states,
observation_index):
"""
Parameters
----------
predicted_observation_mean_derivatives : numpy array.
An array of dimension n x m x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space mean at each observation time point, wrt each parameter
predicted_observation_variance_derivatives : numpy array.
An array of dimension n x m x 2 x 2, where n is the number of observation time points,
and m is the number of parameters. This gives the (non-updated) predicted derivative of the state
space variance at each observation time point, wrt each parameter
current_observation : numpy array.
A 1 x 2 array which describes the observation of protein at the current time point. The first
column is time, and the second column is the protein level
state_space_mean_derivative : numpy array.
An array of dimension n x m x 2, where n is the number of inferred time points,
and m is the number of parameters. The m columns in the second dimension are the
derivative of the state mean with respect to each parameter. The two elements in
the third dimension represent the derivative of mRNA and protein respectively
state_space_variance_derivative : numpy array.
An array of dimension 7 x 2n x 2n.
[ d[cov( mRNA(t0:tn),mRNA(t0:tn) )]/d_theta, d[cov( protein(t0:tn),mRNA(t0:tn) )]/d_theta,
d[cov( mRNA(t0:tn),protein(t0:tn) )/]d_theta, d[cov( protein(t0:tn),protein(t0:tn) )]/d_theta ]
current_number_of_states : float.
The current number of (hidden and observed) states upto the current observation time point.
This includes the initial states (with negative time).
total_number_of_states : float.
The total number of (observed and hidden) states, used to index the variance matrix
observation_index : int.
The index for the current observation time in the main kalman_filter loop
Returns
-------
predicted_observation_mean_derivatives[observation_index + 1] : numpy array.
An array of dimension 7 x 2, which contains the derivative of the mean mRNA
and protein wrt each parameter at the current observation time point
predicted_observation_variance_derivatives[observation_index + 1] : numpy array.
An array of dimension 7 x 2 x 2, which describes the derivative of the state
space variance wrt each parameter for the current time point
"""
predicted_observation_mean_derivatives[observation_index+1] = state_space_mean_derivative[current_number_of_states-1]
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([current_number_of_states-1,
total_number_of_states+current_number_of_states-1]):
for short_column_index, long_column_index in enumerate([current_number_of_states-1,
total_number_of_states+current_number_of_states-1]):
predicted_observation_variance_derivatives[observation_index+1,parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
return predicted_observation_mean_derivatives[observation_index + 1], predicted_observation_variance_derivatives[observation_index + 1]
# @jit(nopython = True)
def kalman_prediction_step(state_space_mean,
state_space_variance,
state_space_mean_derivative,
state_space_variance_derivative,
current_observation,
model_parameters,
observation_time_step,
derivative):
"""
Perform the Kalman filter prediction about future observation, based on current knowledge i.e. current
state space mean and variance. This gives rho_{t+\delta t-tau:t+\delta t} and P_{t+\delta t-tau:t+\delta t},
using the differential equations in supplementary section 4 of Calderazzo et al., Bioinformatics (2018),
approximated using a forward Euler scheme.
TODO: update variable descriptions
Parameters
----------
state_space_mean : numpy array.
The dimension is n x 3, where n is the number of states until the current time.
The first column is time, the second column is mean mRNA, and the third column is mean protein. It
represents the information based on observations we have already made.
state_space_variance : numpy array.
The dimension is 2n x 2n, where n is the number of states until the current time. The definition
is identical to the one provided in the Kalman filter function, i.e.
[ cov( mRNA(t0:tn),mRNA(t0:tn) ), cov( protein(t0:tn),mRNA(t0:tn) ),
cov( mRNA(t0:tn),protein(t0:tn) ), cov( protein(t0:tn),protein(t0:tn) ]
state_space_mean_derivative : numpy array.
An array of dimension n x m x 2, where n is the number of inferred time points,
and m is the number of parameters. The m columns in the second dimension are the
derivative of the state mean with respect to each parameter. The two elements in
the third dimension represent the derivative of mRNA and protein respectively
state_space_variance_derivative : numpy array.
An array of dimension 7 x 2n x 2n.
[ d[cov( mRNA(t0:tn),mRNA(t0:tn) )]/d_theta, d[cov( protein(t0:tn),mRNA(t0:tn) )]/d_theta,
d[cov( mRNA(t0:tn),protein(t0:tn) )/]d_theta, d[cov( protein(t0:tn),protein(t0:tn) )]/d_theta ]
current_observation : numpy array.
The dimension is 1 x 2, where the first entry is time, and the second is the protein observation.
model_parameters : numpy array.
An array containing the model parameters. The order is identical to the one provided in the
Kalman filter function documentation, i.e.
repression_threshold, hill_coefficient, mRNA_degradation_rate,
protein_degradation_rate, basal_transcription_rate, translation_rate,
transcription_delay.
observation_time_step : float.
This gives the time between each experimental observation. This is required to know how far
the function should predict.
Returns
-------
predicted_state_space_mean : numpy array.
The dimension is n x 3, where n is the number of previous observations until the current time.
The first column is time, the second column is mean mRNA, and the third column is mean protein.
predicted_state_space_variance : numpy array.
The dimension is 2n x 2n, where n is the number of previous observations until the current time.
[ cov( mRNA(t0:tn),mRNA(t0:tn) ), cov( protein(t0:tn),mRNA(t0:tn) ),
cov( mRNA(t0:tn),protein(t0:tn) ), cov( protein(t0:tn),protein(t0:tn) ]
state_space_mean_derivative : numpy array.
An array of dimension n x m x 2, where n is the number of inferred time points,
and m is the number of parameters. The m columns in the second dimension are the
derivative of the state mean with respect to each parameter. The two elements in
the third dimension represent the derivative of mRNA and protein respectively
state_space_variance_derivative : numpy array.
An array of dimension 7 x 2n x 2n.
[ d[cov( mRNA(t0:tn),mRNA(t0:tn) )]/d_theta, d[cov( protein(t0:tn),mRNA(t0:tn) )]/d_theta,
d[cov( mRNA(t0:tn),protein(t0:tn) )/]d_theta, d[cov( protein(t0:tn),protein(t0:tn) )]/d_theta ]
"""
# This is the time step dt in the forward euler scheme
discretisation_time_step = 1.0
## name the model parameters
repression_threshold = model_parameters[0]
hill_coefficient = model_parameters[1]
mRNA_degradation_rate = model_parameters[2]
protein_degradation_rate = model_parameters[3]
basal_transcription_rate = model_parameters[4]
translation_rate = model_parameters[5]
transcription_delay = model_parameters[6]
discrete_delay = int(np.around(transcription_delay/discretisation_time_step))
number_of_hidden_states = int(np.around(observation_time_step/discretisation_time_step))
# this is the number of states at t, i.e. before predicting towards t+observation_time_step
current_number_of_states = (int(np.around(current_observation[0]/observation_time_step))-1)*number_of_hidden_states + discrete_delay+1
total_number_of_states = state_space_mean.shape[0]
## next_time_index corresponds to 't+Deltat' in the propagation equation on page 5 of the supplementary
## material in the calderazzo paper
# we initialise all our matrices outside of the main for loop for improved performance
# this is P(t,t)
current_covariance_matrix = np.zeros((2,2))
# this is P(t-\tau,t) in page 5 of the supplementary material of Calderazzo et. al.
covariance_matrix_past_to_now = np.zeros((2,2))
# this is P(t,t-\tau) in page 5 of the supplementary material of Calderazzo et. al.
covariance_matrix_now_to_past = np.zeros((2,2))
# This corresponds to P(s,t) in the Calderazzo paper
covariance_matrix_intermediate_to_current = np.zeros((2,2))
# This corresponds to P(s,t-tau)
covariance_matrix_intermediate_to_past = np.zeros((2,2))
# this is d_rho(t)/d_theta
next_mean_derivative = np.zeros((7,2))
# this is d_P(t,t)/d_theta
current_covariance_derivative_matrix = np.zeros((7,2,2))
# this is d_P(t-\tau,t)/d_theta
covariance_derivative_matrix_past_to_now = np.zeros((7,2,2))
# this is d_P(t,t-\tau)/d_theta
covariance_derivative_matrix_now_to_past = np.zeros((7,2,2))
# d_P(t+Deltat,t+Deltat)/d_theta
next_covariance_derivative_matrix = np.zeros((7,2,2))
# initialisation for the common part of the derivative of P(t,t) for each parameter
common_state_space_variance_derivative_element = np.zeros((7,2,2))
# This corresponds to d_P(s,t)/d_theta in the Calderazzo paper
covariance_matrix_derivative_intermediate_to_current = np.zeros((7,2,2))
# This corresponds to d_P(s,t-tau)/d_theta
covariance_matrix_derivative_intermediate_to_past = np.zeros((7,2,2))
# This corresponds to d_P(s,t+Deltat)/d_theta in the Calderazzo paper
covariance_matrix_derivative_intermediate_to_next = np.zeros((7,2,2))
# initialisation for the common part of the derivative of P(s,t) for each parameter
common_intermediate_state_space_variance_derivative_element = np.zeros((7,2,2))
# derivations for the following are found in Calderazzo et. al. (2018)
# g is [[-mRNA_degradation_rate,0], *[M(t),
# [translation_rate,-protein_degradation_rate]] [P(t)]
# and its derivative will be called instant_jacobian
# f is [[basal_transcription_rate*hill_function(past_protein)],0]
# and its derivative with respect to the past state will be called delayed_jacobian
# the matrix A in the paper will be called variance_of_noise
instant_jacobian = np.array([[-mRNA_degradation_rate,0.0],[translation_rate,-protein_degradation_rate]])
instant_jacobian_transpose = np.transpose(instant_jacobian)
for ii, next_time_index in enumerate(range(current_number_of_states, current_number_of_states + number_of_hidden_states)):
current_time_index = next_time_index - 1 # this corresponds to t
past_time_index = current_time_index - discrete_delay # this corresponds to t-tau
# indexing with 1:3 for numba
current_mean = state_space_mean[current_time_index,1:3]
past_protein = state_space_mean[past_time_index,2]
if ii == 0:
print(current_mean)
print(state_space_mean[past_time_index,1:3])
past_mRNA = state_space_mean[past_time_index,1]
hill_function_value = 1.0/(1.0+np.power(past_protein/repression_threshold,hill_coefficient))
# if ii == 0:
# print(hill_function_value)
hill_function_derivative_value = - hill_coefficient*np.power(past_protein/repression_threshold,
hill_coefficient - 1)/( repression_threshold*
np.power(1.0+np.power( past_protein/repression_threshold,
hill_coefficient),2))
# jacobian of f is derivative of f with respect to past state ([past_mRNA, past_protein])
delayed_jacobian = np.array([[0.0,basal_transcription_rate*hill_function_derivative_value],[0.0,0.0]])
delayed_jacobian_transpose = np.transpose(delayed_jacobian)
## derivative of mean is contributions from instant reactions + contributions from past reactions
derivative_of_mean = ( np.array([[-mRNA_degradation_rate,0.0],
[translation_rate,-protein_degradation_rate]]).dot(current_mean) +
np.array([basal_transcription_rate*hill_function_value,0]) )
next_mean = current_mean + discretisation_time_step*derivative_of_mean
# ensures the prediction is non negative
next_mean = np.maximum(next_mean,0)
# indexing with 1:3 for numba
state_space_mean[next_time_index,1:3] = next_mean
# in the next lines we use for loop instead of np.ix_-like indexing for numba
for short_row_index, long_row_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
current_covariance_matrix[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
# this is P(t-\tau,t) in page 5 of the supplementary material of Calderazzo et. al
for short_row_index, long_row_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
covariance_matrix_past_to_now[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
# this is P(t,t-\tau) in page 5 of the supplementary material of Calderazzo et. al.
for short_row_index, long_row_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
for short_column_index, long_column_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
covariance_matrix_now_to_past[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
variance_change_current_contribution = ( instant_jacobian.dot(current_covariance_matrix) +
current_covariance_matrix.dot(instant_jacobian_transpose) )
variance_change_past_contribution = ( delayed_jacobian.dot(covariance_matrix_past_to_now) +
covariance_matrix_now_to_past.dot(delayed_jacobian_transpose) )
variance_of_noise = np.array([[mRNA_degradation_rate*current_mean[0]+basal_transcription_rate*hill_function_value,0],
[0,translation_rate*current_mean[0]+protein_degradation_rate*current_mean[1]]])
derivative_of_variance = ( variance_change_current_contribution +
variance_change_past_contribution +
variance_of_noise )
# P(t+Deltat,t+Deltat)
next_covariance_matrix = current_covariance_matrix + discretisation_time_step*derivative_of_variance
# ensure that the diagonal entries are non negative
np.fill_diagonal(next_covariance_matrix,np.maximum(np.diag(next_covariance_matrix),0))
# in the next lines we use for loop instead of np.ix_-like indexing for numba
for short_row_index, long_row_index in enumerate([next_time_index,
total_number_of_states+next_time_index]):
for short_column_index, long_column_index in enumerate([next_time_index,
total_number_of_states+next_time_index]):
state_space_variance[long_row_index,long_column_index] = next_covariance_matrix[short_row_index,
short_column_index]
## now we need to update the cross correlations, P(s,t) in the Calderazzo paper
# the range needs to include t, since we want to propagate P(t,t) into P(t,t+Deltat)
for intermediate_time_index in range(past_time_index,current_time_index+1):
# This corresponds to P(s,t) in the Calderazzo paper
for short_row_index, long_row_index in enumerate([intermediate_time_index,
total_number_of_states+intermediate_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
covariance_matrix_intermediate_to_current[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
# This corresponds to P(s,t-tau)
for short_row_index, long_row_index in enumerate([intermediate_time_index,
total_number_of_states+intermediate_time_index]):
for short_column_index, long_column_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
covariance_matrix_intermediate_to_past[short_row_index,short_column_index] = state_space_variance[long_row_index,
long_column_index]
covariance_derivative = ( covariance_matrix_intermediate_to_current.dot( instant_jacobian_transpose) +
covariance_matrix_intermediate_to_past.dot(delayed_jacobian_transpose))
# This corresponds to P(s,t+Deltat) in the Calderazzo paper
covariance_matrix_intermediate_to_next = covariance_matrix_intermediate_to_current + discretisation_time_step*covariance_derivative
# Fill in the big matrix
for short_row_index, long_row_index in enumerate([intermediate_time_index,
total_number_of_states+intermediate_time_index]):
for short_column_index, long_column_index in enumerate([next_time_index,
total_number_of_states+next_time_index]):
state_space_variance[long_row_index,long_column_index] = covariance_matrix_intermediate_to_next[short_row_index,
short_column_index]
# Fill in the big matrix with transpose arguments, i.e. P(t+Deltat, s) - works if initialised symmetrically
for short_row_index, long_row_index in enumerate([next_time_index,
total_number_of_states+next_time_index]):
for short_column_index, long_column_index in enumerate([intermediate_time_index,
total_number_of_states+intermediate_time_index]):
state_space_variance[long_row_index,long_column_index] = covariance_matrix_intermediate_to_next[short_column_index,
short_row_index]
#################################
####
#### prediction step for the derivatives of the state space mean and variance wrt each parameter
####
#################################
###
### state space mean derivatives
###
if derivative:
# indexing with 1:3 for numba
current_mean_derivative = state_space_mean_derivative[current_time_index,:,0:2]
past_mean_derivative = state_space_mean_derivative[past_time_index,:,0:2]
past_protein_derivative = state_space_mean_derivative[past_time_index,:,1]
# calculate predictions for derivative of mean wrt each parameter
# repression threshold
hill_function_derivative_value_wrt_repression = hill_coefficient*np.power(past_protein/repression_threshold,
hill_coefficient)/( repression_threshold*
np.power(1.0+np.power( past_protein/repression_threshold,
hill_coefficient),
2))
repression_derivative = ( instant_jacobian.dot(current_mean_derivative[0]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[0]).reshape((2,1)) +
np.array([[basal_transcription_rate*hill_function_derivative_value_wrt_repression],[0.0]]) )
next_mean_derivative[0] = current_mean_derivative[0] + discretisation_time_step*(repression_derivative.reshape((1,2)))
# hill coefficient
hill_function_derivative_value_wrt_hill_coefficient = - np.log(past_protein/repression_threshold)*np.power(past_protein/repression_threshold,
hill_coefficient)/( np.power(1.0+np.power( past_protein/repression_threshold,
hill_coefficient),2))
hill_coefficient_derivative = ( instant_jacobian.dot(current_mean_derivative[1]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[1]).reshape((2,1)) +
np.array(([[basal_transcription_rate*hill_function_derivative_value_wrt_hill_coefficient],[0.0]])) )
next_mean_derivative[1] = current_mean_derivative[1] + discretisation_time_step*(hill_coefficient_derivative.reshape((1,2)))
# mRNA degradation rate
mRNA_degradation_rate_derivative = ( instant_jacobian.dot(current_mean_derivative[2]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[2]).reshape((2,1)) +
np.array(([[-current_mean[0]],[0.0]])) )
next_mean_derivative[2] = current_mean_derivative[2] + discretisation_time_step*(mRNA_degradation_rate_derivative.reshape((1,2)))
# protein degradation rate
protein_degradation_rate_derivative = ( instant_jacobian.dot(current_mean_derivative[3]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[3]).reshape((2,1)) +
np.array(([[0.0],[-current_mean[1]]])) )
next_mean_derivative[3] = current_mean_derivative[3] + discretisation_time_step*(protein_degradation_rate_derivative.reshape((1,2)))
# basal transcription rate
basal_transcription_rate_derivative = ( instant_jacobian.dot(current_mean_derivative[4]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[4]).reshape((2,1)) +
np.array(([[hill_function_value],[0.0]])) )
next_mean_derivative[4] = current_mean_derivative[4] + discretisation_time_step*(basal_transcription_rate_derivative.reshape((1,2)))
# translation rate
translation_rate_derivative = ( instant_jacobian.dot(current_mean_derivative[5]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[5]).reshape((2,1)) +
np.array(([[0.0],[current_mean[0]]])) )
next_mean_derivative[5] = current_mean_derivative[5] + discretisation_time_step*(translation_rate_derivative.reshape((1,2)))
# transcriptional delay
transcription_delay_derivative = ( instant_jacobian.dot(current_mean_derivative[6]).reshape((2,1)) +
delayed_jacobian.dot(past_mean_derivative[6]).reshape((2,1)) +
np.array(([[-basal_transcription_rate*hill_function_derivative_value*(
translation_rate*past_mRNA - protein_degradation_rate*past_protein)],[0.0]])) )
next_mean_derivative[6] = current_mean_derivative[6] + discretisation_time_step*(transcription_delay_derivative.reshape((1,2)))
# assign the predicted derivatives to our state_space_mean_derivative array
state_space_mean_derivative[next_time_index] = next_mean_derivative
###
### state space variance derivatives
###
# in the next lines we use for loop instead of np.ix_-like indexing for numba
# this is d_P(t,t)/d_theta
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
current_covariance_derivative_matrix[parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
# this is d_P(t-\tau,t)/d_theta
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
covariance_derivative_matrix_past_to_now[parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
# this is d_P(t,t-\tau)/d_theta
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
for short_column_index, long_column_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
covariance_derivative_matrix_now_to_past[parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
## d_P(t+Deltat,t+Deltat)/d_theta
# the derivative is quite long and slightly different for each parameter, meaning it's difficult to
# code this part with a loop. For each parameter we divide it in to it's constituent parts. There is one
# main part in common for every derivative which is defined here as common_state_space_variance_derivative_element
for parameter_index in range(7):
common_state_space_variance_derivative_element[parameter_index] = ( np.dot(instant_jacobian,
current_covariance_derivative_matrix[parameter_index]) +
np.dot(current_covariance_derivative_matrix[parameter_index],
instant_jacobian_transpose) +
np.dot(delayed_jacobian,
covariance_derivative_matrix_past_to_now[parameter_index]) +
np.dot(covariance_derivative_matrix_now_to_past[parameter_index],
delayed_jacobian_transpose) )
hill_function_second_derivative_value = hill_coefficient*np.power(past_protein/repression_threshold,
hill_coefficient)*(
np.power(past_protein/repression_threshold,
hill_coefficient) +
hill_coefficient*(np.power(past_protein/repression_threshold,
hill_coefficient)-1)+1)/( np.power(past_protein,2)*
np.power(1.0+np.power( past_protein/repression_threshold,
hill_coefficient),3))
# repression threshold
# this refers to d(f'(p(t-\tau)))/dp_0
hill_function_second_derivative_value_wrt_repression = -np.power(hill_coefficient,2)*(np.power(past_protein/repression_threshold,
hill_coefficient)-1)*np.power(past_protein/repression_threshold,
hill_coefficient-1)/( np.power(repression_threshold,2)*
(np.power(1.0+np.power( past_protein/repression_threshold,
hill_coefficient),3)))
# instant_jacobian_derivative_wrt_repression = 0
delayed_jacobian_derivative_wrt_repression = (np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value*past_mean_derivative[0,1]],[0.0,0.0]]) +
np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value_wrt_repression],[0.0,0.0]]) )
delayed_jacobian_derivative_wrt_repression_transpose = np.transpose(delayed_jacobian_derivative_wrt_repression)
instant_noise_derivative_wrt_repression = (np.array([[mRNA_degradation_rate*current_mean_derivative[0,0],0.0],
[0.0,translation_rate*current_mean_derivative[0,0] + protein_degradation_rate*current_mean_derivative[0,1]]]))
delayed_noise_derivative_wrt_repression = (np.array([[basal_transcription_rate*(hill_function_derivative_value*past_mean_derivative[0,1] + hill_function_derivative_value_wrt_repression),0.0],
[0.0,0.0]]))
derivative_of_variance_wrt_repression_threshold = ( common_state_space_variance_derivative_element[0] +
np.dot(delayed_jacobian_derivative_wrt_repression,covariance_matrix_past_to_now) +
np.dot(covariance_matrix_now_to_past,delayed_jacobian_derivative_wrt_repression_transpose) +
instant_noise_derivative_wrt_repression + delayed_noise_derivative_wrt_repression )
next_covariance_derivative_matrix[0] = current_covariance_derivative_matrix[0] + discretisation_time_step*(derivative_of_variance_wrt_repression_threshold)
# hill coefficient
# this refers to d(f'(p(t-\tau)))/dh
hill_function_second_derivative_value_wrt_hill_coefficient = np.power(past_protein/repression_threshold,hill_coefficient)*(-np.power(past_protein/repression_threshold,hill_coefficient) +
hill_coefficient*(np.power(past_protein/repression_threshold,hill_coefficient)-1)*np.log(past_protein/repression_threshold)-1)/(
past_protein*np.power(1.0+np.power(past_protein/repression_threshold,hill_coefficient),3))
# instant_jacobian_derivative_wrt_hill_coefficient = 0
delayed_jacobian_derivative_wrt_hill_coefficient = (np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value*past_mean_derivative[1,1]],[0.0,0.0]]) +
np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value_wrt_hill_coefficient],[0.0,0.0]]) )
instant_noise_derivative_wrt_hill_coefficient = (np.array([[mRNA_degradation_rate*current_mean_derivative[1,0],0.0],
[0.0,translation_rate*current_mean_derivative[1,0] + protein_degradation_rate*current_mean_derivative[1,1]]]))
delayed_noise_derivative_wrt_hill_coefficient = (np.array([[basal_transcription_rate*(hill_function_derivative_value*past_mean_derivative[1,1] + hill_function_derivative_value_wrt_hill_coefficient),0.0],
[0.0,0.0]]))
derivative_of_variance_wrt_hill_coefficient = ( common_state_space_variance_derivative_element[1] +
np.dot(delayed_jacobian_derivative_wrt_hill_coefficient,covariance_matrix_past_to_now) +
np.dot(covariance_matrix_now_to_past,np.transpose(delayed_jacobian_derivative_wrt_hill_coefficient)) +
instant_noise_derivative_wrt_hill_coefficient + delayed_noise_derivative_wrt_hill_coefficient )
next_covariance_derivative_matrix[1] = current_covariance_derivative_matrix[1] + discretisation_time_step*(derivative_of_variance_wrt_hill_coefficient)
# mRNA degradation rate
instant_jacobian_derivative_wrt_mRNA_degradation = np.array([[-1.0,0.0],[0.0,0.0]])
delayed_jacobian_derivative_wrt_mRNA_degradation = (np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value*past_mean_derivative[2,1]],[0.0,0.0]]) )
instant_noise_derivative_wrt_mRNA_degradation = (np.array([[mRNA_degradation_rate*current_mean_derivative[2,0] + current_mean[0],0.0],
[0.0,translation_rate*current_mean_derivative[2,0] + protein_degradation_rate*current_mean_derivative[2,1]]]))
delayed_noise_derivative_wrt_mRNA_degradation = (np.array([[basal_transcription_rate*(hill_function_derivative_value*past_mean_derivative[2,1]),0.0],
[0.0,0.0]]))
derivative_of_variance_wrt_mRNA_degradation = ( common_state_space_variance_derivative_element[2] +
np.dot(instant_jacobian_derivative_wrt_mRNA_degradation,current_covariance_matrix) +
np.dot(current_covariance_matrix,np.transpose(instant_jacobian_derivative_wrt_mRNA_degradation)) +
np.dot(delayed_jacobian_derivative_wrt_mRNA_degradation,covariance_matrix_past_to_now) +
np.dot(covariance_matrix_now_to_past,np.transpose(delayed_jacobian_derivative_wrt_mRNA_degradation)) +
instant_noise_derivative_wrt_mRNA_degradation + delayed_noise_derivative_wrt_mRNA_degradation )
next_covariance_derivative_matrix[2] = current_covariance_derivative_matrix[2] + discretisation_time_step*(derivative_of_variance_wrt_mRNA_degradation)
# protein degradation rate
instant_jacobian_derivative_wrt_protein_degradation = np.array([[0.0,0.0],[0.0,-1.0]])
delayed_jacobian_derivative_wrt_protein_degradation = (np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value*past_mean_derivative[3,1]],[0.0,0.0]]) )
instant_noise_derivative_wrt_protein_degradation = (np.array([[mRNA_degradation_rate*current_mean_derivative[3,0],0.0],
[0.0,translation_rate*current_mean_derivative[3,0] + protein_degradation_rate*current_mean_derivative[3,1] + current_mean[1]]]))
delayed_noise_derivative_wrt_protein_degradation = (np.array([[basal_transcription_rate*(hill_function_derivative_value*past_mean_derivative[3,1]),0.0],
[0.0,0.0]]))
derivative_of_variance_wrt_protein_degradation = ( common_state_space_variance_derivative_element[3] +
np.dot(instant_jacobian_derivative_wrt_protein_degradation,current_covariance_matrix) +
np.dot(current_covariance_matrix,np.transpose(instant_jacobian_derivative_wrt_protein_degradation)) +
np.dot(delayed_jacobian_derivative_wrt_protein_degradation,covariance_matrix_past_to_now) +
np.dot(covariance_matrix_now_to_past,np.transpose(delayed_jacobian_derivative_wrt_protein_degradation)) +
instant_noise_derivative_wrt_protein_degradation + delayed_noise_derivative_wrt_protein_degradation )
next_covariance_derivative_matrix[3] = current_covariance_derivative_matrix[3] + discretisation_time_step*(derivative_of_variance_wrt_protein_degradation)
# basal transcription rate
# instant_jacobian_derivative_wrt_basal_transcription = 0
delayed_jacobian_derivative_wrt_basal_transcription = (np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value*past_mean_derivative[4,1]],[0.0,0.0]]) +
np.array([[0.0,hill_function_derivative_value],[0.0,0.0]]) )
instant_noise_derivative_wrt_basal_transcription = (np.array([[mRNA_degradation_rate*current_mean_derivative[4,0],0.0],
[0.0,translation_rate*current_mean_derivative[4,0] + protein_degradation_rate*current_mean_derivative[4,1]]]))
delayed_noise_derivative_wrt_basal_transcription = (np.array([[basal_transcription_rate*hill_function_derivative_value*past_mean_derivative[4,1] + hill_function_value,0.0],
[0.0,0.0]]))
derivative_of_variance_wrt_basal_transcription = ( common_state_space_variance_derivative_element[4] +
np.dot(delayed_jacobian_derivative_wrt_basal_transcription,covariance_matrix_past_to_now) +
np.dot(covariance_matrix_now_to_past,np.transpose(delayed_jacobian_derivative_wrt_basal_transcription)) +
instant_noise_derivative_wrt_basal_transcription + delayed_noise_derivative_wrt_basal_transcription )
next_covariance_derivative_matrix[4] = current_covariance_derivative_matrix[4] + discretisation_time_step*(derivative_of_variance_wrt_basal_transcription)
# translation rate
instant_jacobian_derivative_wrt_translation_rate = np.array([[0.0,0.0],[1.0,0.0]])
delayed_jacobian_derivative_wrt_translation_rate = (np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value*past_mean_derivative[5,1]],[0.0,0.0]]))
instant_noise_derivative_wrt_translation_rate = (np.array([[mRNA_degradation_rate*current_mean_derivative[5,0],0.0],
[0.0,translation_rate*current_mean_derivative[5,0] + protein_degradation_rate*current_mean_derivative[5,1] + current_mean[0]]]))
delayed_noise_derivative_wrt_translation_rate = (np.array([[basal_transcription_rate*hill_function_derivative_value*past_mean_derivative[5,1],0.0],
[0.0,0.0]]))
derivative_of_variance_wrt_translation_rate = ( common_state_space_variance_derivative_element[5] +
np.dot(instant_jacobian_derivative_wrt_translation_rate,current_covariance_matrix) +
np.dot(current_covariance_matrix,np.transpose(instant_jacobian_derivative_wrt_translation_rate)) +
np.dot(delayed_jacobian_derivative_wrt_translation_rate,covariance_matrix_past_to_now) +
np.dot(covariance_matrix_now_to_past,np.transpose(delayed_jacobian_derivative_wrt_translation_rate)) +
instant_noise_derivative_wrt_translation_rate + delayed_noise_derivative_wrt_translation_rate )
next_covariance_derivative_matrix[5] = current_covariance_derivative_matrix[5] + discretisation_time_step*(derivative_of_variance_wrt_translation_rate)
# transcriptional delay
# instant_jacobian_derivative_wrt_transcription_delay = 0
delayed_jacobian_derivative_wrt_transcription_delay = np.array([[0.0,basal_transcription_rate*hill_function_second_derivative_value*past_mean_derivative[6,1]],[0.0,0.0]])
instant_noise_derivative_wrt_transcription_delay = (np.array([[mRNA_degradation_rate*current_mean_derivative[6,0],0.0],
[0.0,translation_rate*current_mean_derivative[6,0] + protein_degradation_rate*current_mean_derivative[6,1]]]))
delayed_noise_derivative_wrt_transcription_delay = np.array([[basal_transcription_rate*hill_function_derivative_value*past_mean_derivative[6,1],0.0],
[0.0,0.0]])
derivative_of_variance_wrt_transcription_delay = ( common_state_space_variance_derivative_element[6] +
np.dot(delayed_jacobian_derivative_wrt_transcription_delay,covariance_matrix_past_to_now) +
np.dot(covariance_matrix_now_to_past,np.transpose(delayed_jacobian_derivative_wrt_transcription_delay)) +
instant_noise_derivative_wrt_transcription_delay + delayed_noise_derivative_wrt_transcription_delay )
next_covariance_derivative_matrix[6] = current_covariance_derivative_matrix[6] + discretisation_time_step*(derivative_of_variance_wrt_transcription_delay)
# in the next lines we use for loop instead of np.ix_-like indexing for numba
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([next_time_index,
total_number_of_states+next_time_index]):
for short_column_index, long_column_index in enumerate([next_time_index,
total_number_of_states+next_time_index]):
state_space_variance_derivative[parameter_index,long_row_index,long_column_index] = next_covariance_derivative_matrix[parameter_index,
short_row_index,
short_column_index]
## now we need to update the cross correlations, d_P(s,t)/d_theta in the Calderazzo paper
# the range needs to include t, since we want to propagate d_P(t,t)/d_theta into d_P(t,t+Deltat)/d_theta
for intermediate_time_index in range(past_time_index,current_time_index+1):
# This corresponds to d_P(s,t)/d_theta in the Calderazzo paper
# for loops instead of np.ix_-like indexing
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([intermediate_time_index,
total_number_of_states+intermediate_time_index]):
for short_column_index, long_column_index in enumerate([current_time_index,
total_number_of_states+current_time_index]):
covariance_matrix_derivative_intermediate_to_current[parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
# This corresponds to d_P(s,t-tau)/d_theta
for parameter_index in range(7):
for short_row_index, long_row_index in enumerate([intermediate_time_index,
total_number_of_states+intermediate_time_index]):
for short_column_index, long_column_index in enumerate([past_time_index,
total_number_of_states+past_time_index]):
covariance_matrix_derivative_intermediate_to_past[parameter_index,short_row_index,short_column_index] = state_space_variance_derivative[parameter_index,
long_row_index,
long_column_index]
# Again, this derivative is slightly different for each parameter, meaning it's difficult to
# code this part with a loop. For each parameter we divide it in to it's constituent parts. There is one
# main part in common for every derivative which is defined here as common_intermediate_state_space_variance_derivative_element
for parameter_index in range(7):
common_intermediate_state_space_variance_derivative_element[parameter_index] = ( np.dot(covariance_matrix_derivative_intermediate_to_current[parameter_index],
instant_jacobian_transpose) +
np.dot(covariance_matrix_derivative_intermediate_to_past[parameter_index],
delayed_jacobian_transpose) )
# repression threshold
derivative_of_intermediate_variance_wrt_repression_threshold = ( common_intermediate_state_space_variance_derivative_element[0] +
np.dot(covariance_matrix_intermediate_to_past,delayed_jacobian_derivative_wrt_repression_transpose) )
covariance_matrix_derivative_intermediate_to_next[0] = covariance_matrix_derivative_intermediate_to_current[0] + discretisation_time_step*(derivative_of_intermediate_variance_wrt_repression_threshold)
# hill coefficient
derivative_of_intermediate_variance_wrt_hill_coefficient = ( common_intermediate_state_space_variance_derivative_element[1] +
np.dot(covariance_matrix_intermediate_to_past,np.transpose(delayed_jacobian_derivative_wrt_hill_coefficient)))
covariance_matrix_derivative_intermediate_to_next[1] = covariance_matrix_derivative_intermediate_to_current[1] + discretisation_time_step*(derivative_of_intermediate_variance_wrt_hill_coefficient)
# mRNA degradation rate
derivative_of_intermediate_variance_wrt_mRNA_degradation = ( common_intermediate_state_space_variance_derivative_element[2] +
np.dot(covariance_matrix_intermediate_to_current,np.transpose(instant_jacobian_derivative_wrt_mRNA_degradation)) +
np.dot(covariance_matrix_intermediate_to_past,np.transpose(delayed_jacobian_derivative_wrt_mRNA_degradation)) )
covariance_matrix_derivative_intermediate_to_next[2] = covariance_matrix_derivative_intermediate_to_current[2] + discretisation_time_step*(derivative_of_intermediate_variance_wrt_mRNA_degradation)
# protein degradation rate
derivative_of_intermediate_variance_wrt_protein_degradation = ( common_intermediate_state_space_variance_derivative_element[3] +
np.dot(covariance_matrix_intermediate_to_current,np.transpose(instant_jacobian_derivative_wrt_protein_degradation)) +
np.dot(covariance_matrix_intermediate_to_past,np.transpose(delayed_jacobian_derivative_wrt_protein_degradation)) )
covariance_matrix_derivative_intermediate_to_next[3] = covariance_matrix_derivative_intermediate_to_current[3] + discretisation_time_step*(derivative_of_intermediate_variance_wrt_protein_degradation)
# basal transcription rate
derivative_of_intermediate_variance_wrt_basal_transcription = ( common_intermediate_state_space_variance_derivative_element[4] +
np.dot(covariance_matrix_intermediate_to_past,np.transpose(delayed_jacobian_derivative_wrt_basal_transcription)) )
covariance_matrix_derivative_intermediate_to_next[4] = covariance_matrix_derivative_intermediate_to_current[4] + discretisation_time_step*(derivative_of_intermediate_variance_wrt_basal_transcription)
# translation rate
derivative_of_intermediate_variance_wrt_translation_rate = ( common_intermediate_state_space_variance_derivative_element[5] +
np.dot(covariance_matrix_intermediate_to_current, | np.transpose(instant_jacobian_derivative_wrt_translation_rate) | numpy.transpose |
"""
General utility class for accessing pickle data files and for simple transformations
Generally data is stored as dict of arrays, as dumped and compressed file.
using pytables is probably the best approach but we may have problems with portability
(or easy installation in different environments)
so here we use simpler approach with more standard packages zlib and pickle
"""
import gzip
from io import BufferedReader
import io
import os
import pickle
import zlib
import logging
import numpy as np
from config import DATA_DIR, PUBLISH_DIR, MEAN_MARKERS, BED_TO_BIG_BED, BED_GRAPH_TO_BIG_WIG, CHROM_SIZES
import re
__author__ = 'eran'
def wig_transform(wig_file, smoothing=100, output=True):
"""
Gets a wig file and transforms it to dictionary (chr1 -> [scores], ch2 -> [scores]...)
and saves it with pickle
@param output: Whether to output the transformed wig to npz file (name.smoothing.npz)
@param smoothing: bin size for smoothing (common raw data can be 20bp)
@param wig_file: name of wig file (or wig.gz)
@return: a dictionary where key are chromosomes and values are scores
@note it is recommended to use use bed graph files and use load_bg which works much faster
"""
written_dict = dict() # dictionary keys: chromosomes [ score, score], and chromosome-position: start position
if wig_file.endswith('.gz'):
in_stream = BufferedReader(gzip.open(wig_file, 'rb'))
in_file = (in_line.decode('ascii') for in_line in in_stream)
else:
in_stream = io.open(wig_file, 'r')
prev_data = []
l_prev_data = 0
chrm_score = []
chrom = None
rlines = 0
frag_size = 20
cur_smoothing = smoothing / frag_size
prev_pos = 0
for r in in_file:
if r[0] == 'v': # new chromosome, variable step
if chrom is not None:
chrm_score.append(sum(prev_data))
written_dict[chrom] = chrm_score
chrm_score = []
prev_data = []
print(r.strip())
chrom_track = r.rstrip().split(' ')
frag_size = int(chrom_track[2].split('=')[1].strip())
cur_smoothing = int(smoothing / frag_size)
chrom = chrom_track[1].split('=')[1]
prev_pos = 1
pos, score = next(in_file).split('\t')
elif r[0] == 't':
continue # track line
else:
pos, score = r.split('\t')
pos = int(pos)
score = int(score)
if pos != prev_pos:
prev_data += [0] * int(np.floor((pos - prev_pos) / frag_size))
l_prev_data = len(prev_data)
prev_data += [score]
l_prev_data += 1
prev_pos = pos + frag_size
if l_prev_data > cur_smoothing:
for smooth_bin in zip(*[iter(prev_data)] * cur_smoothing):
chrm_score.append(sum(smooth_bin))
remaining = (len(prev_data) % cur_smoothing)
prev_data = prev_data[-remaining:] if remaining > 0 else []
l_prev_data = len(prev_data)
rlines += 1
if len(prev_data) != 0:
chrm_score.append(sum(prev_data))
written_dict[chrom] = chrm_score
in_stream.close()
print('Finished reading and down-sampling')
if output:
# save to same file but with different extensions
wig_file = wig_file.replace('.gz', '') # remove .gz extension for compressed file
output_filename = wig_file.replace('.wig', '.%i.npz' % smoothing)
save_result_dict(output_filename, output_filename)
#with open(output_filename, 'wb') as output:
# output.write(zlib.compress(pickle.dumps(written_dict, pickle.HIGHEST_PROTOCOL)))
print('Finished writing file')
return written_dict
def chrom_sizes(bin_sizes=20):
"""
Get chromosome sizes of hg19 as dictionary
@rtype : dict
@return: dictionary with key as chromosome name and value as size (for bins of size of 20)
"""
with open(CHROM_SIZES) as chrom_size_fd:
chromosomes = (r.split('\t') for r in chrom_size_fd.readlines())
chrom_dict = dict(((chrom[0], int(chrom[1]) // bin_sizes) for chrom in chromosomes))
return chrom_dict
def load_bg(bg_file):
"""
Loads bed graph
should be faster then the above and directly load it
@param bg_file: bg file to be loaded
@return: dictionary of chromosomes keys and values as wig like arrays
"""
def load_bg_slow():
"""
Fallback - if pandas doesnt exist and you get into trouble with cython compilations
"""
chr_to_ind = dict()
chr_to_ind_inv = dict()
for i in range(1, 26):
chr_name = 'chr%i' % i
if i == 23:
chr_name = 'chrX'
if i == 24:
chr_name = 'chrY'
if i == 25:
chr_name = 'chrM'
chr_name = chr_name.encode('ascii')
chr_to_ind[chr_name] = i
for k, v in chr_to_ind.items():
chr_to_ind_inv[v] = k.decode('utf-8')
# 0 - chromosome, 1 - start, 2 - end
bed_matrix = np.loadtxt(bg_file, delimiter='\t', usecols=(0, 1, 2, 3),
converters={
0: lambda x: chr_to_ind[x]
}, dtype=int)
chromosomes_ind = set(bed_matrix[:, 0])
res_dict = dict()
for chromosome in chromosomes_ind:
selector = bed_matrix[:, 0] == chromosome
chrom_matrix = np.array(bed_matrix[selector, [1, 2, 3]])
# TO BIN SIZE
chrom_matrix[:, [0, 1]] = chrom_matrix[:, [0, 1]] / 20
last_end = max(chrom_matrix[:, 1])
long_rep = | np.zeros(last_end) | numpy.zeros |
# coding=utf8
"""
Graph-Based Semi-Supervised Learning (GBSSL) implementation.
"""
# Authors: <NAME> <<EMAIL>>
# Lisence: MIT
import numpy as np
from scipy import sparse
from abc import ABCMeta, abstractmethod
from sklearn.base import BaseEstimator, ClassifierMixin
class Base(BaseEstimator, ClassifierMixin):
__metaclass__ = ABCMeta
def __init__(self,graph,max_iter=30):
self.max_iter = max_iter
self.graph = graph
@abstractmethod
def _build_propagation_matrix(self):
raise NotImplementedError("Propagation matrix construction must be implemented to fit a model.")
@abstractmethod
def _build_base_matrix(self):
raise NotImplementedError("Base matrix construction must be implemented to fit a model.")
def _init_label_matrix(self):
n_samples = self.graph.shape[0]
n_classes = self.y_.max()+1
return np.zeros((n_samples,n_classes))
def _arrange_params(self):
"""Do nothing by default"""
pass
def fit(self,x,y):
"""Fit a graph-based semi-supervised learning model
All the input data is provided array X (labeled samples only)
and corresponding label array y.
Parameters
----------
x : array_like, shape = [n_labeled_samples]
Node IDs of labeled samples
y : array_like, shape = [n_labeled_samples]
Label IDs of labeled samples
Returns
-------
self : returns an instance of self.
"""
self.x_ = x
self.y_ = y
self._arrange_params()
self.F_ = self._init_label_matrix()
self.P_ = self._build_propagation_matrix()
self.B_ = self._build_base_matrix()
remaining_iter = self.max_iter
while remaining_iter > 0:
self.F_ = self._propagate()
remaining_iter -= 1
return self
def _propagate(self):
return self.P_.dot(self.F_) + self.B_
def predict(self,x):
"""Performs prediction based on the fitted model
Parameters
----------
x : array_like, shape = [n_samples]
Node IDs
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input node IDs
"""
probas = self.predict_proba(x)
return np.argmax(probas,axis=1)
def predict_proba(self,x):
"""Predict probability for each possible label
Parameters
----------
x : array_like, shape = [n_samples]
Node IDs
Returns
-------
probabilities : array_like, shape = [n_samples, n_classes]
Probability distributions across class labels
"""
z = np.sum(self.F_[x], axis=1)
z[z==0] += 1 # Avoid division by 0
return (self.F_[x].T / z).T
class LGC(Base):
"""Local and Global Consistency (LGC) for GBSSL
Parameters
----------
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
Attributes
----------
x_ : array, shape = [n_samples]
Input array of node IDs.
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2004).
Learning with local and global consistency.
Advances in neural information processing systems, 16(16), 321-328.
"""
def __init__(self,graph=None,alpha=0.99,max_iter=30):
super(LGC, self).__init__(graph,max_iter=30)
self.alpha=alpha
def _build_propagation_matrix(self):
""" LGC computes the normalized Laplacian as its propagation matrix"""
degrees = self.graph.sum(axis=0).A[0]
degrees[degrees==0] += 1 # Avoid division by 0
D2 = np.sqrt(sparse.diags((1.0/degrees),offsets=0))
S = D2.dot(self.graph).dot(D2)
return self.alpha*S
def _build_base_matrix(self):
n_samples = self.graph.shape[0]
n_classes = self.y_.max()+1
B = np.zeros((n_samples,n_classes))
B[self.x_,self.y_] = 1
return (1-self.alpha)*B
class HMN(Base):
"""Harmonic funcsion (HMN) for GBSSL
Parameters
----------
max_iter : float
maximum number of iterations allowed
Attributes
----------
x_ : array, shape = [n_samples]
Input array of node IDs.
References
----------
<NAME>., <NAME>., & <NAME>. (2003, August).
Semi-supervised learning using gaussian fields and harmonic functions.
In ICML (Vol. 3, pp. 912-919).
"""
def __init__(self,graph=None,max_iter=30):
super(HMN, self).__init__(graph,max_iter=30)
def _build_propagation_matrix(self):
degrees = self.graph.sum(axis=0).A[0]
degrees[degrees==0] += 1 # Avoid division by 0
D = sparse.diags((1.0/degrees),offsets=0)
P = D.dot(self.graph).tolil()
P[self.x_] = 0
return P.tocsr()
def _build_base_matrix(self):
n_samples = self.graph.shape[0]
n_classes = self.y_.max()+1
B = np.zeros((n_samples,n_classes))
B[self.x_,self.y_] = 1
return B
class PARW(Base):
"""Partially Absorbing Random Walk (PARW) for GBSSL
Parameters
----------
lamb: float (default=0.001)
Absorbing parameter
max_iter : float
maximum number of iterations allowed
Attributes
----------
x_ : array, shape = [n_samples]
Input array of node IDs.
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012).
Learning with partially absorbing random walks.
In Advances in Neural Information Processing Systems (pp. 3077-3085).
"""
def __init__(self,graph=None,lamb=1.0,max_iter=30):
super(PARW, self).__init__(graph,max_iter=30)
self.lamb=lamb
def _build_propagation_matrix(self):
d = self.graph.sum(axis=1).T.A[0]
Z = sparse.diags(1.0 / (d+self.lamb), offsets=0)
P = Z.dot(self.graph)
return P
def _build_base_matrix(self):
n_samples = self.graph.shape[0]
n_classes = self.y_.max()+1
B = np.zeros((n_samples,n_classes))
B[self.x_,self.y_] = 1
d = np.array(self.graph.sum(1).T)[0]
Z = sparse.diags(1.0 / (d+self.lamb), offsets=0)
Lamb = sparse.diags(self.lamb,shape=(n_samples,n_samples), offsets=0)
return Z.dot(Lamb).dot(B)
class OMNI(Base):
"""OMNI-Prop for GBSSL
Parameters
----------
lamb : float > 0 (default = 1.0)
Define importance between prior and evidence from neighbors
max_iter : float
maximum number of iterations allowed
Attributes
----------
x_ : array, shape = [n_samples]
Input array of node IDs.
References
----------
<NAME>., <NAME>., & <NAME>. (2015, February).
OMNI-Prop: Seamless Node Classification on Arbitrary Label Correlation.
In Twenty-Ninth AAAI Conference on Artificial Intelligence.
"""
def __init__(self,graph=None,lamb=1.0,max_iter=30):
super(OMNI,self).__init__(graph,max_iter)
self.lamb = lamb
def _build_propagation_matrix(self):
d = self.graph.sum(axis=0).A[0]
dT = self.graph.sum(axis=1).T.A[0]
Q = (sparse.diags(1.0/(d+self.lamb), offsets=0).dot(self.graph)).dot(sparse.diags(1.0/(dT+self.lamb),offsets=0).dot(self.graph.T)).tolil()
Q[self.x_] = 0
return Q
def _build_base_matrix(self):
n_samples = self.graph.shape[0]
n_classes = self.y_.max()+1
unlabeled = np.setdiff1d(np.arange(n_samples),self.x_)
dU = self.graph[unlabeled].sum(axis=1).T.A[0]
dT = self.graph.sum(axis=0).A[0]
n_samples = self.graph.shape[0]
r = sparse.diags(1.0/(dU+self.lamb),offsets=0).dot(self.lamb*self.graph[unlabeled].dot(sparse.diags(1.0/(dT+self.lamb),offsets=0)).dot(np.ones(n_samples))+self.lamb)
b = np.ones(n_classes) / float(n_classes)
B = np.zeros((n_samples,n_classes))
B[unlabeled] = np.outer(r,b)
B[self.x_,self.y_] = 1
return B
class CAMLP(Base):
"""Confidence-Aware Modulated Label Propagation (CAMLP) for GBSSL
Parameters
----------
beta : float > 0 (default = 0.1)
Define importance between prior and evidence from neighbors
H : array_like, shape = [n_classes, n_classes]
Define affinities between labels
if None, identity matrix is set
max_iter : float
maximum number of iterations allowed
Attributes
----------
x_ : array, shape = [n_samples]
Input array of node IDs.
References
----------
<NAME>., <NAME>., & <NAME>. (2016, May).
CAMLP: Confidence-Aware Modulated Label Propagation.
In SIAM International Conference on Data Mining.
"""
def __init__(self,graph=None,beta=0.1,H=None,max_iter=30):
super(CAMLP,self).__init__(graph,max_iter)
self.beta=beta
self.H=H
def _arrange_params(self):
if self.H is None:
n_classes = self.y_.max()+1
self.H = | np.identity(n_classes) | numpy.identity |
# -*- coding: utf-8 -*-
# Original file: https://github.com/bolunwang/backdoor
# Original Author: <NAME> (<EMAIL>)
# Original License: MIT
# Adpated to evaluate pytorch models
import os
import time
import argparse
import numpy as np
import random
import torch
from tensorboard.backend.event_processing import event_accumulator
from visualizer_pytorch import Visualizer_p
import utils_backdoor_pytorch
import sys
sys.path.append("..")
from precision_utils import trans_state_dict_test
from precision_utils import trans_state_dict_pruning_test
parser = argparse.ArgumentParser(description='Neural Cleanse Testing')
# dataset and model
parser.add_argument('--dataset', type=str, default='zzz',
help='dataset to use')
parser.add_argument('--model-dir', type=str, default='../checkpoint/',
help='where to read the model')
parser.add_argument('--ckpt-name', type=str, default='str',
help='where to read the model')
parser.add_argument('--network-arch', type=str, default='zzz',
help='model arch')
parser.add_argument('--pruning', action='store_true')
parser.add_argument('--quantization', action='store_true')
parser.add_argument('--margin', default=0.5, type=float, help='for quantization')
parser.add_argument('--epoch', default=35, type=int, help='for quantization')
parser.add_argument('--random-seed', type=int, default=0,
help='random seed')
args = parser.parse_args()
random_seed = args.random_seed
print('random seed:', random_seed)
torch.manual_seed(random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
MODEL_DIR = args.model_dir
NETWORK_ARCH = args.network_arch
RESULT_DIR = args.ckpt_name
RESULT_DIR = RESULT_DIR.replace('/', '_')
RESULT_DIR = './Neural_Cleanse/' + RESULT_DIR
IMG_FILENAME_TEMPLATE = args.dataset + '_visualize_%s_label_%d_v%d.png'
# input size
IMG_ROWS = 32
IMG_COLS = 32
IMG_COLOR = 3
INPUT_SHAPE = (IMG_COLOR, IMG_ROWS, IMG_COLS)
if args.dataset == 'cifar10':
NUM_CLASSES = 10
elif args.dataset == 'gtsrb':
NUM_CLASSES = 43
else:
raise ValueError('Dataset currently unsupported!')
Y_TARGET = 0
INTENSITY_RANGE = args.dataset
BATCH_SIZE = 32 # batch size used for optimization
LR = 0.1 # learning rate
STEPS = 1000 # total optimization iterations
NB_SAMPLE = 1000 # number of samples in each mini batch
MINI_BATCH = NB_SAMPLE // BATCH_SIZE # mini batch size used for early stop
INIT_COST = 1e-3 # initial weight used for balancing two objectives
REGULARIZATION = 'l1' # reg term to control the mask's norm
ATTACK_SUCC_THRESHOLD = 0.98 # attack success threshold of the reversed attack
PATIENCE = 6 # patience for adjusting weight, number of mini batches
COST_MULTIPLIER = 2 # multiplier for auto-control of weight (COST)
SAVE_LAST = False # whether to save the last result or best result
EARLY_STOP = True # whether to early stop
EARLY_STOP_THRESHOLD = 1.0 # loss threshold for early stop
EARLY_STOP_PATIENCE = 5 * PATIENCE # patience for early stop
# the following part is not used in our experiment
# but our code implementation also supports super-pixel mask
UPSAMPLE_SIZE = 1 # size of the super pixel
MASK_SHAPE = np.ceil(np.array(INPUT_SHAPE[1:3], dtype=float) / UPSAMPLE_SIZE)
MASK_SHAPE = MASK_SHAPE.astype(int)
print('dataset', INTENSITY_RANGE)
print('Result dir', RESULT_DIR)
def find_epoch(name, max_epoch=69, margin=0.5):
name = '../train_quantization_attack/tensorboard/' + name
ea = event_accumulator.EventAccumulator(name)
ea.Reload()
pt_ = ea.scalars.Items('INT8/RACC/PT')
tpt_ = ea.scalars.Items('INT8/RTACC/PT')
asr_ = ea.scalars.Items('Float32/TACC')
pt = np.array([i.value for i in pt_])[:max_epoch]
tpt = np.array([i.value for i in tpt_])[:max_epoch]
asr = np.array([i.value for i in asr_])[:max_epoch]
pt_max = np.max(pt)
tpt[pt + margin < pt_max] = 0
if args.dataset == 'cifar10':
thres_ = 20
elif args.dataset == 'gtsrb':
thres_ = 10
tpt[asr > thres_] = 0
tmp = np.zeros(max_epoch)
for i in range(max_epoch):
tmp[i] = tpt[i]
i = np.argmax(tmp)
print('max values:', pt_max)
print('epoch:', i, 'values:', pt[i], tpt[i])
return i
if args.quantization:
epoch = find_epoch(args.ckpt_name, max_epoch=args.epoch, margin=args.margin)
MODEL_FILENAME = '%s/%dper_tensor_quantnet.pth' % (args.ckpt_name, epoch)
else:
MODEL_FILENAME = args.ckpt_name
print('model name', MODEL_FILENAME)
def visualize_trigger_w_mask(visualizer, gen, y_target,
save_version=0, save_pattern_flag=True):
visualize_start_time = time.time()
# initialize with random mask
pattern = np.random.random(INPUT_SHAPE).astype('f') * 255.0
mask = np.random.random(MASK_SHAPE).astype('f')
# execute reverse engineering
pattern, mask, mask_upsample, logs = visualizer.visualize(
gen=gen, y_target=y_target, pattern_init=pattern, mask_init=mask)
# meta data about the generated mask
print('pattern, shape: %s, min: %f, max: %f' %
(str(pattern.shape), | np.min(pattern) | numpy.min |
""" functions to sort, compute stats, and compare arrays of eigenmodes"""
import pandas as pd
import numpy as np
from scipy.spatial import distance
from scipy.stats import entropy, spearmanr, pearsonr
def eig_fc_get_standardz(x, y, binary_thresh=0.1, nreps=1000):
"""Permutes both canonical networks and input eigenmodes 1000 times
and calculates the standardized overlap score between each eigen mode
and each canonical network, standardize by mean and standard deviation
Args:
x (array): [eigenmode vector]
y (array): [canonical functional network vector]
binary_thresh (int, optional): Defaults to 0. threshold for binarizing regional values
binarize (bool, optional): Defaults to False. Do you want to binarize or not
Returns:
zxy [array]: standardized overlapscores
sxy [array]: non-standardized overlap scores
"""
ub, lb = 1, 0 # 1 or 0s after thresholding
x = np.where(x > binary_thresh, ub, lb)
y = np.where(y > binary_thresh, ub, lb)
np.random.seed(24)
mean_std_perm = np.zeros(nreps)
# std_perm = np.zeros(1000)
for i in np.arange(nreps):
xperm = np.random.permutation(x)
yperm = np.random.permutation(y)
mean_std_perm[i] = get_sxy(xperm, yperm)
sxy = get_sxy(x, y)
# compute mean and standard deviation:
zxy = (sxy - np.mean(mean_std_perm)) / np.std(mean_std_perm)
return zxy, sxy
def get_overlap_score_dfs(all_eig, all_fc_networks, threshold=0.1):
"""[summary]
Args:
all_eig (array): eigen modes that you want to compare
all_fc_networks (array): all canonical networks being compared to
Returns:
df_overlap_score [Pandas DataFrame]: DataFrame with all overlap scores
df_sxy [Pandas DataFrame]: DataFrame with
"""
df_cols = all_fc_networks.index
df_ind = ["Eig #%d" % x for x in np.arange(all_eig.shape[1]) + 1]
df_overlap_score = pd.DataFrame([], index=df_ind, columns=df_cols)
df_sxy = pd.DataFrame([], index=df_ind, columns=df_cols)
eigcounter = 0
for eignum in df_sxy.index:
for name in all_fc_networks.index:
fc_vector = all_fc_networks.loc[name].values
eig_vector = all_eig[:, eigcounter]
df_overlap_score.at[eignum, name], df_sxy.at[
eignum, name
] = eig_fc_get_standardz(eig_vector, fc_vector, binary_thresh=threshold)
eigcounter += 1
return df_overlap_score, df_sxy
def get_sxy(x, y):
"""Get s(x,y) where
Inputs:
x - module/eigen mode
y - canoical network
"""
ind_eig = np.where(x > 0.1)
ind_fc = np.where(y > 0.1)
# find intersection and union
fc_eig_intersect = np.intersect1d(ind_eig, ind_fc)
fc_eig_union = | np.union1d(ind_eig, ind_fc) | numpy.union1d |
#!/usr/bin/env python
# coding: utf-8
# In[491]:
import matplotlib.pyplot as plt
# ^^^ pyforest auto-imports - don't write above this line
import numpy as np
import pathlib
from skimage import color
import random
# In[253]:
defaultPath = r'G:\Documents\ReferenceBooks\1-DigitalImageProcessing\HW3\Images\\'
# ### Convert image to FFT
# In[362]:
class GrayImageFFT:
def load_gray_from_file(self,defaultPath,fileName):
self.image = plt.imread(defaultPath+fileName)
self.image = color.rgb2gray(self.image)
def load_from_array(self,array):
self.image = array
def fft(self):
self.fft= | np.fft.fft2(self.image) | numpy.fft.fft2 |
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
pgf_with_custom_preamble = {
"font.family": "serif", # use serif/main font for text elements
"text.usetex": True, # use inline math for ticks
# "pgf.rcfonts": False, # don't setup fonts from rc parameters
"pgf.preamble": [
"\\usepackage{unicode-math,amsmath,amssymb,amsthm}", # unicode math setup
]
}
mpl.rcParams.update(pgf_with_custom_preamble)
import matplotlib.pyplot as plt
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
def makeup_for_plot(fig1):
fig1.spines["top"].set_visible(False)
fig1.spines["bottom"].set_visible(True)
fig1.spines["right"].set_visible(False)
fig1.spines["left"].set_visible(True)
fig1.get_xaxis().tick_bottom()
fig1.get_yaxis().tick_left()
fig1.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on",labelsize=12)
grid_color = '#e3e3e3'
grid_line_style= '--'
fig1.grid(linestyle=grid_line_style,color=grid_color)
return fig1
def do_tight_layout_for_fig(fig):
fig.tight_layout()
return fig
lr_vals = [0.1]
colors = ['red','green','c','m','y','orange','green','c','m','y','black','brown','orange','blue', 'black','blue','brown','red','orange','green','c','m','y','orange','green','c','m','y']
import argparse
parser = argparse.ArgumentParser(description='Plot Experiments')
parser.add_argument('--fun_num', '--fun_num', default=0,type=int, dest='fun_num')
args = parser.parse_args()
fun_num = args.fun_num
my_markers = ['','','','','','','']
if fun_num == 0:
files = {
1: 'results/cocain_mf_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
2: 'results/bpg_mf_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
3: 'results/palm_mf_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_beta_0.0_lam_val_0.1.txt',
4: 'results/palm_mf_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_beta_0.2_lam_val_0.1.txt',
5: 'results/palm_mf_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_beta_0.4_lam_val_0.1.txt',
6: 'results/bpg_mf_wb_movielens_fun_name_1_dataset_option_1_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
}
if fun_num == 1:
files = {
1: 'results/cocain_mf_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
2: 'results/bpg_mf_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
3: 'results/palm_mf_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_beta_0.0_lam_val_0.1.txt',
4: 'results/palm_mf_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_beta_0.2_lam_val_0.1.txt',
5: 'results/palm_mf_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_beta_0.4_lam_val_0.1.txt',
6: 'results/bpg_mf_wb_movielens_fun_name_1_dataset_option_2_abs_fun_num_1_breg_num_1_lam_val_0.1.txt'
}
if fun_num == 2:
files = {
1: 'results/cocain_mf_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
2: 'results/bpg_mf_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_lam_val_0.1.txt',
3: 'results/palm_mf_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_beta_0.0_lam_val_0.1.txt',
4: 'results/palm_mf_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_beta_0.2_lam_val_0.1.txt',
5: 'results/palm_mf_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_beta_0.4_lam_val_0.1.txt',
6: 'results/bpg_mf_wb_movielens_fun_name_1_dataset_option_3_abs_fun_num_1_breg_num_1_lam_val_0.1.txt'
}
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1 = makeup_for_plot(ax1)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2 = makeup_for_plot(ax2)
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3 = makeup_for_plot(ax3)
fig4 = plt.figure()
ax4 = fig4.add_subplot(111)
ax4 = makeup_for_plot(ax4)
fig5 = plt.figure()
ax5 = fig5.add_subplot(111)
ax5 = makeup_for_plot(ax5)
label_font_size = 13
legend_font_size = 17
my_line_width = 2
labels_dict = {
1: r"CoCaIn BPG-MF",
6: r"BPG-MF-WB",
2: r"BPG-MF",
3: r"PALM",
4: r"iPALM ($\beta = 0.2$)",
5: r"iPALM ($\beta = 0.4$)",
}
nb_epoch = 1000
opt_vals= np.array([1,6,2,3,4,5])
color_count = 0
f_opt = 0
min_fun_val = np.inf
for i in opt_vals:
file_name = files[i]
try:
best_train_objective_vals = np.loadtxt(file_name)[:,0]
min_fun_val = np.nanmin([min_fun_val,np.min(best_train_objective_vals)])
print(min_fun_val)
except:
pass
for i in opt_vals:
file_name = files[i]
print(file_name)
try:
best_train_objective_vals = np.loadtxt(file_name)[:,0]
best_time_vals = np.loadtxt(file_name)[:,1]
except:
best_train_objective_vals = np.loadtxt(file_name)
ax4.loglog(( | np.arange(nb_epoch) | numpy.arange |
import numpy as np
import tensorflow as tf
class NeuralBandit:
def __init__(self, nPicos, ABSval, CREval, initExploration, epsilon_0, batch_size=1):
nActivePicosVal = np.arange(0, (nPicos+1))
self.controlSpace = np.array(np.meshgrid(nActivePicosVal, ABSval, CREval)).T.reshape(-1, 3)
self.nControls = len(self.controlSpace[:, 0])
# self.nControls = 10
# Network Parameters
n_hidden_1 = 20 # 1st layer number of features
n_hidden_2 = 20 # 2nd layer number of features
n_input = 2 # data input
n_output = 1 # function output
learning_rate = 0.001
self.batch_size = batch_size
# self.batch_count = np.zeros((self.nControls))
# self.batch_buffer = np.zeros((self.nControls, self.batch_size))
self.count = np.zeros((self.nControls))
self.current_cost = np.zeros((self.nControls))
self.initExploration = initExploration
self.epsilon_0 = epsilon_0
self.neuralArms = list()
self.armCost = list()
self.armOptimizer = list()
self.x = tf.placeholder("float", [None, n_input])
self.y = tf.placeholder("float", [None, n_output])
def multilayer_perceptron(x, weights, biases):
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
for i in range(self.nControls):
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='h1_'+str(i)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='h2_'+str(i)),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_output]), name='hout_'+str(i))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1_'+str(i)),
'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2_'+str(i)),
'out': tf.Variable(tf.random_normal([n_output]), name='bout_'+str(i))
}
pred = multilayer_perceptron(self.x, weights, biases)
cost = tf.reduce_sum(tf.pow(pred - self.y, 2)) / self.batch_size
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
self.neuralArms.append(pred)
self.armCost.append(cost)
self.armOptimizer.append(optimizer)
if np.mod(i, 20) == 0:
print('NeuralBandit: Created NN number ' + str(i) + ' of '+str(self.nControls))
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
self.algConf = {'epsilon_0': epsilon_0,
'initExploration': initExploration,
'batch_size': batch_size,
'n_hidden_1': n_hidden_1,
'n_hidden_2': n_hidden_2,
'learning_rate': learning_rate,
'Common_layer': 'no'}
def getControl(self, inputData):
x = inputData['state']
indexes = | np.where(self.count < self.initExploration) | numpy.where |
"""
@ author: Alex
@ E-mail: <EMAIL>
@ Introduction: Simple application of PCA algorithm
"""
import numpy as np
def PCA(X,k):
"""
@ param X : data set
@ param k : the components you want
@ return data : k dimension of features
"""
# step 1: mean of each feature
samples_number, features_number = X.shape
mean = np.array([np.mean(X[:,i]) for i in range(features_number)])
normalization_X=X-mean
# step 2: find the scatter matrix
# scatter matrix is same to the convariance matrix, scatter_matrix = convariance_matrix * (data - 1)
scatter_matrix = np.dot(np.transpose(normalization_X), normalization_X)
# step 3: Calculate the eigenvectors and eigenvalues
eig_values, eig_vectors = | np.linalg.eig(scatter_matrix) | numpy.linalg.eig |
from __future__ import division, print_function, absolute_import
from .core import SeqletCoordinates
from modisco import util
import numpy as np
from collections import defaultdict, Counter
import itertools
from sklearn.neighbors.kde import KernelDensity
import sys
import time
from .value_provider import (
AbstractValTransformer, AbsPercentileValTransformer,
SignedPercentileValTransformer)
import scipy
class TransformAndThresholdResults(object):
def __init__(self, neg_threshold,
transformed_neg_threshold,
pos_threshold,
transformed_pos_threshold,
val_transformer):
#both 'transformed_neg_threshold' and 'transformed_pos_threshold'
# should be positive, i.e. they should be relative to the
# transformed distribution used to set the threshold, e.g. a
# cdf value
self.neg_threshold = neg_threshold
self.transformed_neg_threshold = transformed_neg_threshold
self.pos_threshold = pos_threshold
self.transformed_pos_threshold = transformed_pos_threshold
self.val_transformer = val_transformer
def save_hdf5(self, grp):
grp.attrs["neg_threshold"] = self.neg_threshold
grp.attrs["transformed_neg_threshold"] = self.transformed_neg_threshold
grp.attrs["pos_threshold"] = self.pos_threshold
grp.attrs["transformed_pos_threshold"] = self.transformed_pos_threshold
self.val_transformer.save_hdf5(grp.create_group("val_transformer"))
@classmethod
def from_hdf5(cls, grp):
neg_threshold = grp.attrs['neg_threshold']
transformed_neg_threshold = grp.attrs['transformed_neg_threshold']
pos_threshold = grp.attrs['pos_threshold']
transformed_pos_threshold = grp.attrs['transformed_pos_threshold']
val_transformer = AbstractValTransformer.from_hdf5(
grp["val_transformer"])
return cls(neg_threshold=neg_threshold,
transformed_neg_threshold=transformed_neg_threshold,
pos_threshold=pos_threshold,
transformed_pos_threshold=transformed_pos_threshold,
val_transformer=val_transformer)
class AbstractCoordProducer(object):
def __call__(self):
raise NotImplementedError()
@classmethod
def from_hdf5(cls, grp):
the_class = eval(grp.attrs["class"])
return the_class.from_hdf5(grp)
class SeqletCoordsFWAP(SeqletCoordinates):
"""
Coordinates for the FixedWindowAroundChunks CoordProducer
"""
def __init__(self, example_idx, start, end, score):
self.score = score
super(SeqletCoordsFWAP, self).__init__(
example_idx=example_idx,
start=start, end=end,
is_revcomp=False)
class CoordProducerResults(object):
def __init__(self, coords, tnt_results):
self.coords = coords
self.tnt_results = tnt_results
@classmethod
def from_hdf5(cls, grp):
coord_strings = util.load_string_list(dset_name="coords",
grp=grp)
coords = [SeqletCoordinates.from_string(x) for x in coord_strings]
tnt_results = TransformAndThresholdResults.from_hdf5(
grp["tnt_results"])
return CoordProducerResults(coords=coords,
tnt_results=tnt_results)
def save_hdf5(self, grp):
util.save_string_list(
string_list=[str(x) for x in self.coords],
dset_name="coords",
grp=grp)
self.tnt_results.save_hdf5(
grp=grp.create_group("tnt_results"))
def get_simple_window_sum_function(window_size):
def window_sum_function(arrs):
to_return = []
for arr in arrs:
cumsum = np.cumsum(arr)
cumsum = np.array([0]+list(cumsum))
to_return.append(cumsum[window_size:]-cumsum[:-window_size])
return to_return
return window_sum_function
class GenerateNullDist(object):
def __call__(self, score_track):
raise NotImplementedError()
class TakeSign(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.sign(x) for x in score_track]
return null_tracks
class TakeAbs(GenerateNullDist):
@classmethod
def from_hdf5(cls, grp):
raise NotImplementedError()
def save_hdf(cls, grp):
raise NotImplementedError()
def __call__(self, score_track):
null_tracks = [np.abs(x) for x in score_track]
return null_tracks
class LaplaceNullDist(GenerateNullDist):
def __init__(self, num_to_samp, verbose=True,
percentiles_to_use=[5*(x+1) for x in range(19)],
random_seed=1234):
self.num_to_samp = num_to_samp
self.verbose = verbose
self.percentiles_to_use = np.array(percentiles_to_use)
self.random_seed = random_seed
self.rng = np.random.RandomState()
@classmethod
def from_hdf5(cls, grp):
num_to_samp = grp.attrs["num_to_samp"]
verbose = grp.attrs["verbose"]
percentiles_to_use = np.array(grp["percentiles_to_use"][:])
return cls(num_to_samp=num_to_samp, verbose=verbose)
def save_hdf5(self, grp):
grp.attrs["class"] = type(self).__name__
grp.attrs["num_to_samp"] = self.num_to_samp
grp.attrs["verbose"] = self.verbose
grp.create_dataset('percentiles_to_use',
data=self.percentiles_to_use)
def __call__(self, score_track, windowsize, original_summed_score_track):
#original_summed_score_track is supplied to avoid recomputing it
window_sum_function = get_simple_window_sum_function(windowsize)
if (original_summed_score_track is not None):
original_summed_score_track = window_sum_function(arrs=score_track)
values = np.concatenate(original_summed_score_track, axis=0)
# first estimate mu, using two level histogram to get to 1e-6
hist1, bin_edges1 = np.histogram(values, bins=1000)
peak1 = np.argmax(hist1)
l_edge = bin_edges1[peak1]
r_edge = bin_edges1[peak1+1]
top_values = values[ (l_edge < values) & (values < r_edge) ]
hist2, bin_edges2 = np.histogram(top_values, bins=1000)
peak2 = np.argmax(hist2)
l_edge = bin_edges2[peak2]
r_edge = bin_edges2[peak2+1]
mu = (l_edge + r_edge) / 2
if (self.verbose):
print("peak(mu)=", mu)
pos_values = [x for x in values if x >= mu]
neg_values = [x for x in values if x <= mu]
#for an exponential distribution:
# cdf = 1 - exp(-lambda*x)
# exp(-lambda*x) = 1-cdf
# -lambda*x = log(1-cdf)
# lambda = -log(1-cdf)/x
# x = -log(1-cdf)/lambda
#Take the most aggressive lambda over all percentiles
pos_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.percentile(a=pos_values, q=self.percentiles_to_use)-mu))
neg_laplace_lambda = np.max(
-np.log(1-(self.percentiles_to_use/100.0))/
(np.abs(np.percentile(a=neg_values,
q=100-self.percentiles_to_use)-mu)))
self.rng.seed(self.random_seed)
prob_pos = float(len(pos_values))/(len(pos_values)+len(neg_values))
sampled_vals = []
for i in range(self.num_to_samp):
sign = 1 if (self.rng.uniform() < prob_pos) else -1
if (sign == 1):
sampled_cdf = self.rng.uniform()
val = - | np.log(1-sampled_cdf) | numpy.log |
from functools import partial
from warnings import warn
import numpy as np
from numpy.polynomial.legendre import leggauss
from scipy.special import erf, beta as beta_fn, gammaln
from scipy.linalg import solve_triangular
from numba import njit
from .sys_utilities import hash_array
def sub2ind(sizes, multi_index):
r"""
Map a d-dimensional index to the scalar index of the equivalent flat
1D array
Examples
--------
.. math::
\begin{bmatrix}
0,0 & 0,1 & 0,2\\
1,0 & 1,1 & 1,2\\
2,0 & 2,1 & 2,2
\end{bmatrix}
\rightarrow
\begin{bmatrix}
0 & 3 & 6\\
1 & 4 & 7\\
2 & 5 & 8
\end{bmatrix}
>>> from pyapprox.utilities import sub2ind
>>> sizes = [3,3]
>>> ind = sub2ind(sizes,[1,0])
>>> print(ind)
1
Parameters
----------
sizes : integer
The number of elems in each dimension. For a 2D index
sizes = [numRows, numCols]
multi_index : np.ndarray (len(sizes))
The d-dimensional index
Returns
-------
scalar_index : integer
The scalar index
See Also
--------
pyapprox.utilities.sub2ind
"""
num_sets = len(sizes)
scalar_index = 0
shift = 1
for ii in range(num_sets):
scalar_index += shift * multi_index[ii]
shift *= sizes[ii]
return scalar_index
def ind2sub(sizes, scalar_index, num_elems):
r"""
Map a scalar index of a flat 1D array to the equivalent d-dimensional index
Examples
--------
.. math::
\begin{bmatrix}
0 & 3 & 6\\
1 & 4 & 7\\
2 & 5 & 8
\end{bmatrix}
\rightarrow
\begin{bmatrix}
0,0 & 0,1 & 0,2\\
1,0 & 1,1 & 1,2\\
2,0 & 2,1 & 2,2
\end{bmatrix}
>>> from pyapprox.utilities import ind2sub
>>> sizes = [3,3]
>>> sub = ind2sub(sizes,1,9)
>>> print(sub)
[1 0]
Parameters
----------
sizes : integer
The number of elems in each dimension. For a 2D index
sizes = [numRows, numCols]
scalar_index : integer
The scalar index
num_elems : integer
The total number of elements in the d-dimensional matrix
Returns
-------
multi_index : np.ndarray (len(sizes))
The d-dimensional index
See Also
--------
pyapprox.utilities.sub2ind
"""
denom = num_elems
num_sets = len(sizes)
multi_index = np.empty((num_sets), dtype=int)
for ii in range(num_sets-1, -1, -1):
denom /= sizes[ii]
multi_index[ii] = scalar_index / denom
scalar_index = scalar_index % denom
return multi_index
def cartesian_product(input_sets, elem_size=1):
r"""
Compute the cartesian product of an arbitray number of sets.
The sets can consist of numbers or themselves be lists or vectors. All
the lists or vectors of a given set must have the same number of entries
(elem_size). However each set can have a different number of scalars,
lists, or vectors.
Parameters
----------
input_sets
The sets to be used in the cartesian product.
elem_size : integer
The size of the vectors within each set.
Returns
-------
result : np.ndarray (num_sets*elem_size, num_elems)
The cartesian product. num_elems = np.prod(sizes)/elem_size,
where sizes[ii] = len(input_sets[ii]), ii=0,..,num_sets-1.
result.dtype will be set to the first entry of the first input_set
"""
import itertools
out = []
# ::-1 reverse order to be backwards compatiable with old
# function below
for r in itertools.product(*input_sets[::-1]):
out.append(r)
out = np.asarray(out).T[::-1, :]
return out
try:
from pyapprox.cython.utilities import cartesian_product_pyx
# # fused type does not work for np.in32, np.float32, np.int64
# # so envoke cython cast
# if np.issubdtype(input_sets[0][0],np.signedinteger):
# return cartesian_product_pyx(input_sets,1,elem_size)
# if np.issubdtype(input_sets[0][0],np.floating):
# return cartesian_product_pyx(input_sets,1.,elem_size)
# else:
# return cartesian_product_pyx(
# input_sets,input_sets[0][0],elem_size)
# always convert to float then cast back
cast_input_sets = [np.asarray(s, dtype=float) for s in input_sets]
out = cartesian_product_pyx(cast_input_sets, 1., elem_size)
out = np.asarray(out, dtype=input_sets[0].dtype)
return out
except:
print('cartesian_product extension failed')
num_elems = 1
num_sets = len(input_sets)
sizes = np.empty((num_sets), dtype=int)
for ii in range(num_sets):
sizes[ii] = input_sets[ii].shape[0]/elem_size
num_elems *= sizes[ii]
# try:
# from pyapprox.weave import c_cartesian_product
# # note c_cartesian_product takes_num_elems as last arg and cython
# # takes elem_size
# return c_cartesian_product(input_sets, elem_size, sizes, num_elems)
# except:
# print ('cartesian_product extension failed')
result = np.empty(
(num_sets*elem_size, num_elems), dtype=type(input_sets[0][0]))
for ii in range(num_elems):
multi_index = ind2sub(sizes, ii, num_elems)
for jj in range(num_sets):
for kk in range(elem_size):
result[jj*elem_size+kk, ii] =\
input_sets[jj][multi_index[jj]*elem_size+kk]
return result
def outer_product(input_sets):
r"""
Construct the outer product of an arbitary number of sets.
Examples
--------
.. math::
\{1,2\}\times\{3,4\}=\{1\times3, 2\times3, 1\times4, 2\times4\} =
\{3, 6, 4, 8\}
Parameters
----------
input_sets
The sets to be used in the outer product
Returns
-------
result : np.ndarray(np.prod(sizes))
The outer product of the sets.
result.dtype will be set to the first entry of the first input_set
"""
out = cartesian_product(input_sets)
return np.prod(out, axis=0)
try:
from pyapprox.cython.utilities import outer_product_pyx
# fused type does not work for np.in32, np.float32, np.int64
# so envoke cython cast
if np.issubdtype(input_sets[0][0], np.signedinteger):
return outer_product_pyx(input_sets, 1)
if np.issubdtype(input_sets[0][0], np.floating):
return outer_product_pyx(input_sets, 1.)
else:
return outer_product_pyx(input_sets, input_sets[0][0])
except ImportError:
print('outer_product extension failed')
num_elems = 1
num_sets = len(input_sets)
sizes = np.empty((num_sets), dtype=int)
for ii in range(num_sets):
sizes[ii] = len(input_sets[ii])
num_elems *= sizes[ii]
# try:
# from pyapprox.weave import c_outer_product
# return c_outer_product(input_sets)
# except:
# print ('outer_product extension failed')
result = np.empty((num_elems), dtype=type(input_sets[0][0]))
for ii in range(num_elems):
result[ii] = 1.0
multi_index = ind2sub(sizes, ii, num_elems)
for jj in range(num_sets):
result[ii] *= input_sets[jj][multi_index[jj]]
return result
def unique_matrix_rows(matrix):
unique_rows = []
unique_rows_set = set()
for ii in range(matrix.shape[0]):
key = hash_array(matrix[ii, :])
if key not in unique_rows_set:
unique_rows_set.add(key)
unique_rows.append(matrix[ii, :])
return np.asarray(unique_rows)
def remove_common_rows(matrices):
num_cols = matrices[0].shape[1]
unique_rows_dict = dict()
for ii in range(len(matrices)):
matrix = matrices[ii]
assert matrix.shape[1] == num_cols
for jj in range(matrix.shape[0]):
key = hash_array(matrix[jj, :])
if key not in unique_rows_dict:
unique_rows_dict[key] = (ii, jj)
elif unique_rows_dict[key][0] != ii:
del unique_rows_dict[key]
# else:
# entry is a duplicate entry in the current. Allow this to
# occur but only add one of the duplicates to the unique rows dict
unique_rows = []
for key in list(unique_rows_dict.keys()):
ii, jj = unique_rows_dict[key]
unique_rows.append(matrices[ii][jj, :])
return np.asarray(unique_rows)
def allclose_unsorted_matrix_rows(matrix1, matrix2):
if matrix1.shape != matrix2.shape:
return False
matrix1_dict = dict()
for ii in range(matrix1.shape[0]):
key = hash_array(matrix1[ii, :])
# allow duplicates of rows
if key not in matrix1_dict:
matrix1_dict[key] = 0
else:
matrix1_dict[key] += 1
matrix2_dict = dict()
for ii in range(matrix2.shape[0]):
key = hash_array(matrix2[ii, :])
# allow duplicates of rows
if key not in matrix2_dict:
matrix2_dict[key] = 0
else:
matrix2_dict[key] += 1
if len(list(matrix1_dict.keys())) != len(list(matrix2_dict.keys())):
return False
for key in list(matrix1_dict.keys()):
if key not in matrix2_dict:
return False
if matrix2_dict[key] != matrix1_dict[key]:
return False
return True
def get_2d_cartesian_grid(num_pts_1d, ranges):
r"""
Get a 2d tensor grid with equidistant points.
Parameters
----------
num_pts_1d : integer
The number of points in each dimension
ranges : np.ndarray (4)
The lower and upper bound of each dimension [lb_1,ub_1,lb_2,ub_2]
Returns
-------
grid : np.ndarray (2,num_pts_1d**2)
The points in the tensor product grid.
[x1,x2,...x1,x2...]
[y1,y1,...y2,y2...]
"""
# from math_tools_cpp import cartesian_product_double as cartesian_product
from PyDakota.math_tools import cartesian_product
x1 = np.linspace(ranges[0], ranges[1], num_pts_1d)
x2 = np.linspace(ranges[2], ranges[3], num_pts_1d)
abscissa_1d = []
abscissa_1d.append(x1)
abscissa_1d.append(x2)
grid = cartesian_product(abscissa_1d, 1)
return grid
def invert_permutation_vector(p, dtype=int):
r"""
Returns the "inverse" of a permutation vector. I.e., returns the
permutation vector that performs the inverse of the original
permutation operation.
Parameters
----------
p: np.ndarray
Permutation vector
dtype: type
Data type passed to np.ndarray constructor
Returns
-------
pt: np.ndarray
Permutation vector that accomplishes the inverse of the
permutation p.
"""
N = | np.max(p) | numpy.max |
import pytest
import numpy as np
from rio_color.utils import to_math_type
from rio_color.operations import (
sigmoidal,
gamma,
saturation,
simple_atmo,
parse_operations,
simple_atmo_opstring,
)
@pytest.fixture
def arr():
return to_math_type(
np.array(
[
# red
[[1, 2], [3, 4]],
# green
[[5, 6], [7, 8]],
# blue
[[9, 10], [11, 12]],
]
).astype("uint8")
* 10
)
@pytest.fixture
def arr_rgba():
return to_math_type(
np.array(
[
[[1, 2], [3, 4]], # red
[[5, 6], [7, 8]], # green
[[9, 10], [11, 12]], # blue
[[0, 0], [25.5, 25.5]], # alpha
]
).astype("uint8")
* 10
)
def test_sigmoidal(arr):
x = sigmoidal(arr, 10, 0.15)
assert x[0][0][0] - 0.08056034 < 1e-4
# contrast < 0
x = sigmoidal(arr, -10, 0.15)
assert x[0][0][0] - 0.020186627 < 1e-4
# bias zero, make it a tiny epsilon
x = sigmoidal(arr, 10, 0)
assert x[0][0][0] - 0.19362122 < 1e-4
# contrast zzero, arrays are equal
x = sigmoidal(arr, 0, 0.15)
assert np.array_equal(x, arr)
# output contains NaN
with pytest.raises(ValueError):
x = sigmoidal(arr, 100, -0.5)
# output is not within the range of 0..1
with pytest.raises(ValueError):
arr[0][0][0] = 1.0
arr[0][0][1] = 2.0
x = sigmoidal(arr, 10, -0.5)
def test_gamma(arr):
x = gamma(arr, 0.95)
assert x[0][0][0] - 0.033069782 < 1e-4
# output is not within the range of 0..1
with pytest.raises(ValueError):
x = gamma(arr, -2.0)
# test output contains inf and is out of range 0..1
with pytest.raises(ValueError):
x = gamma(arr, -0.001)
# test output contains NaN
with pytest.raises(ValueError):
x = gamma(arr, np.nan)
with pytest.raises(ValueError):
x = gamma(arr * -1, 2.2)
def test_sat(arr):
x = saturation(arr, 50)
assert x[0][0][0] - 0.15860622 < 1e-4
def test_sat_rgba_direct(arr_rgba):
# Anything but 3-band RGB will fail
with pytest.raises(ValueError):
saturation(arr_rgba, 50)
with pytest.raises(ValueError):
saturation(arr_rgba[0:2], 50)
def test_atmo(arr):
x = simple_atmo(arr, 0.03, 10, 0.15)
assert x[0][0][0] - 0.080560341 < 1e-4
# Gamma output is not within the range 0..1
with pytest.raises(ValueError):
x = simple_atmo(arr, 2.0, 10, 0.15)
# Sigmoidal contrast output contains NaN
with pytest.raises(ValueError):
x = simple_atmo(arr, 0.03, 1000, -0.15)
def test_parse_gamma(arr):
f = parse_operations("gamma rgb 0.95")[0]
assert np.array_equal(f(arr), gamma(arr, 0.95))
def test_parse_sigmoidal(arr):
f = parse_operations("sigmoidal rgb 5 0.53")[0]
assert np.array_equal(f(arr), sigmoidal(arr, contrast=5, bias=0.53))
def test_parse_multi(arr):
f1, f2 = parse_operations("gamma rgb 0.95 sigmoidal rgb 35 0.13")
assert np.array_equal(
f2(f1(arr)), sigmoidal(gamma(arr, g=0.95), contrast=35, bias=0.13)
)
def test_parse_comma(arr):
# Commas are optional whitespace, treated like empty string
f1, f2 = parse_operations("gamma r,g,b 0.95, sigmoidal r,g,b 35 0.13")
assert np.array_equal(
f2(f1(arr)), sigmoidal(gamma(arr, g=0.95), contrast=35, bias=0.13)
)
def test_parse_saturation_rgb(arr):
f = parse_operations("saturation 1.25")[0]
assert np.allclose(f(arr), saturation(arr, 1.25))
def test_parse_rgba(arr, arr_rgba):
f = parse_operations("gamma rg 0.95")[0]
rgb = f(arr)
assert rgb.shape[0] == 3
rgba = f(arr_rgba)
assert rgba.shape[0] == 4
# rgb bands are same
assert np.allclose(rgba[0:3], rgb[0:3])
# alpha unaltered
assert | np.array_equal(rgba[3], arr_rgba[3]) | numpy.array_equal |
import sys
import time
import yaml
import math
import signal
import datetime
import threading
import traceback
import numpy as np
from cvxopt import matrix, solvers
#from scipy.spatial import ConvexHull
import matplotlib.patches as ptc
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# from actions import *
COLORS = [(0.0, 0.0, 0.0), (0.99, 0.0, 0.0), (0.0, 0.99, 0.0), (0.0, 0.0, 0.99), (0.99, 0.99, 0.0), (0.99, 0.0, 0.99), (0.0, 0.99, 0.99)]
global_boundary = []
xlim = []
ylim = []
test_type = 0
world = None
def is_in_space(p, tol):
global xlim, ylim
return xlim[0] - tol <= p[0] <= xlim[1] + tol and ylim[0] - tol <= p[1] <= ylim[1] + tol
def is_in_bounding_polygon(p, tol):
global global_boundary
pass
def angle_in_2pi(v):
angle = | np.arctan2(v[1], v[0]) | numpy.arctan2 |
# Dash dependencies import
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_uploader as du
import uuid
import pathlib
import dash_bootstrap_components as dbc
import plotly.figure_factory as ff
from dash.dependencies import Input, Output,State
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
px.defaults.template = "ggplot2"
# End Dash dependencies import
# Data preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
# ML Algorithm
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
# Model evaluation
from sklearn.metrics import accuracy_score, precision_score, recall_score,f1_score,confusion_matrix,roc_curve,roc_auc_score
# Save model
import os
import io
import shutil
import joblib
from app import app, server
PATH=pathlib.Path(__file__).parent
DATA_PATH=PATH.joinpath("../datasets").resolve()
TELCO_CHURN_FILE_UPLOADS_DATA_PATH=PATH.joinpath("../datasets/telco_churn_file_uploads").resolve()
du.configure_upload(app, TELCO_CHURN_FILE_UPLOADS_DATA_PATH, use_upload_id=False)
TELCO_CHURN_MODEL_DATA_PATH=PATH.joinpath("../Notebooks/Churn Models").resolve()
feat_importance_df=pd.read_csv(DATA_PATH.joinpath("feature-importance.csv"))
df=pd.read_csv(DATA_PATH.joinpath("telco-customer-churn.csv"))
telco_churm_metrics_df=pd.read_json(TELCO_CHURN_MODEL_DATA_PATH.joinpath("model_metrics.json"), orient ='split', compression = 'infer')
joblib_model = joblib.load(TELCO_CHURN_MODEL_DATA_PATH.joinpath("best_gridsearch_model_pipeline.pkl"))
df['TotalCharges']=pd.to_numeric(df['TotalCharges'], errors='coerce')
# Revenue distribution
def distribution_by_revenue(df):
totalcharges_attrition_df=df.groupby( ["Churn"], as_index=False )["TotalCharges"].sum()
totalcharges_attrition_df=totalcharges_attrition_df.sort_values(by=['TotalCharges'],ascending=True)
totalcharges_attrition_df.columns=['Churn','Revenue']
colors = ['crimson','skyblue']
totalcharges_attrition_df=totalcharges_attrition_df.round(0)
fig=px.bar(totalcharges_attrition_df,x='Churn',y='Revenue',color='Churn',text='Revenue',color_discrete_sequence=colors,
title='Churn by Revenue')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.40),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
# churn distribution
def churn_distribution(df):
attrition_df=df.groupby(["Churn"], as_index=False )["customerID"].count()
colors = ['skyblue','crimson']
fig = go.Figure(data=[go.Pie(labels=attrition_df['Churn'].tolist(), values=attrition_df['customerID'].tolist(), hole=.3)])
fig.update_layout(title={'text': 'Customer Churn Distribution','y':0.9,'x':0.5, 'xanchor': 'center','yanchor': 'top'},
showlegend=False,autosize=True,annotations=[dict(text='Attrition', font_size=20, showarrow=False)],margin=dict(t=100,b=0,l=0,r=0),height=350,colorway=colors)
return fig
# gender_attrition_df
def churn_by_gender(df):
gender_attrition_df=df.groupby(["Churn","gender"], as_index=False )["customerID"].count()
gender_attrition_df.columns=['Churn','Gender','Customers']
colors = ['skyblue','crimson']
fig=px.bar(gender_attrition_df,x='Gender',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,
title='Churn by Gender')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.46),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
def churn_by_contract(df):
contract_attrition_df=df.groupby(["Churn","Contract"], as_index=False )["customerID"].count()
contract_base_df=df.groupby(["Contract"], as_index=False )["customerID"].count()
contract_base_df['Churn']='Customer Base'
contract_attrition_df=contract_attrition_df.append(contract_base_df, ignore_index = True)
contract_attrition_df.columns=['Churn','Contract','Customers']
contract_attrition_df=contract_attrition_df.sort_values(by=['Contract', 'Customers'],ascending=True)
colors = ['crimson','skyblue','teal']
fig=px.bar(contract_attrition_df,x='Contract',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,barmode="group",
title='Churn by Customer Contract Type')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
def churn_by_monthlycharges(df):
churn_dist = df[df['Churn']=='Yes']['MonthlyCharges']
no_churn_dist = df[df['Churn']=='No']['MonthlyCharges']
group_labels = ['No Churn', 'Churn Customers']
colors = ['teal','crimson']
fig = ff.create_distplot([no_churn_dist,churn_dist], group_labels, bin_size=[1, .10],
curve_type='kde', show_rug=False, colors=colors)# override default 'kde' or 'normal'
fig.update_layout(title={'text': 'Customer Churn Distribution by Monthly Charges','y':0.9,'x':0.5, 'xanchor': 'center','yanchor': 'top'},
legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=50,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
def tenure_charges_correlation(df):
df_correlation=df[['tenure','MonthlyCharges','TotalCharges']].corr()
fig=px.imshow(df_correlation,title='Tenure, Monthly and Total Charges Correlation')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.40),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def churn_by_citizenship(df):
citizenship_attrition_df=df.groupby( [ "Churn","SeniorCitizen"], as_index=False )["customerID"].count()
citizenship_base_df=df.groupby(["SeniorCitizen"], as_index=False )["customerID"].count()
citizenship_base_df['Churn']='Customer Base'
citizenship_attrition_df=citizenship_attrition_df.append(citizenship_base_df, ignore_index = True)
citizenship_attrition_df.columns=['Churn','Citizenship','Customers']
citizenship_attrition_df=citizenship_attrition_df.sort_values(by=['Citizenship', 'Customers'],ascending=False)
colors = ['teal','skyblue','crimson']
fig=px.bar(citizenship_attrition_df,x='Customers',y=['Citizenship'],color='Churn',text='Customers',orientation="h",color_discrete_sequence=colors,barmode="group",
title='Churn by Citizenship')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def churn_by_tenure(df):
tenure_attrition_df=df.groupby( [ "Churn","tenure"], as_index=False )["customerID"].count()
tenure_attrition_df.columns=['Churn','Tenure','Customers']
colors = ['skyblue','crimson']
tenure_attrition_df=tenure_attrition_df.round(0)
fig = px.treemap(tenure_attrition_df, path=['Churn', 'Tenure'], values='Customers',color_discrete_sequence=colors,
title='Churn by Customer Tenure')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def data_summary(df):
data_summary_df=pd.DataFrame(df.describe())
data_summary_df.reset_index(level=0, inplace=True)
data_summary_df=data_summary_df.drop(columns='SeniorCitizen')
data_summary_df.columns=['Metric','Tenure','MonthlyCharges','TotalCharges']
fig = go.Figure(data=[go.Table(header=dict(values=list(data_summary_df.columns),fill_color='paleturquoise',
align='left'),cells=dict(values=[data_summary_df.Metric, data_summary_df.Tenure, data_summary_df.MonthlyCharges, data_summary_df.TotalCharges],
fill_color='lavender',align='left'))])
fig.update_layout(showlegend=False,autosize=True,margin=dict(t=0,b=0,l=0,r=0),height=350)
return fig
def churn_by_payment_method(df):
PaymentMethod_attrition_df=df.groupby( [ "Churn","PaymentMethod"], as_index=False )["customerID"].count()
PaymentMethod_base_df=df.groupby(["PaymentMethod"], as_index=False )["customerID"].count()
PaymentMethod_base_df['Churn']='Customer Base'
PaymentMethod_attrition_df=PaymentMethod_attrition_df.append(PaymentMethod_base_df, ignore_index = True)
PaymentMethod_attrition_df.columns=['Churn','PaymentMethod','Customers']
PaymentMethod_attrition_df=PaymentMethod_attrition_df.sort_values(by=['PaymentMethod', 'Customers'],ascending=True)
colors = ['crimson','skyblue','teal']
fig=px.bar(PaymentMethod_attrition_df,x='PaymentMethod',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,barmode="group",
title='Churn by Payment Method')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.40),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
def churn_by_techsupport(df):
techsupport_attrition_df=df.groupby( [ "Churn","TechSupport"], as_index=False )["customerID"].count()
techsupport_base_df=df.groupby(["TechSupport"], as_index=False )["customerID"].count()
techsupport_base_df['Churn']='Customer Base'
techsupport_attrition_df=techsupport_attrition_df.append(techsupport_base_df, ignore_index = True)
techsupport_attrition_df.columns=['Churn','TechSupport','Customers']
techsupport_attrition_df=techsupport_attrition_df.sort_values(by=['TechSupport', 'Customers'],ascending=True)
colors = ['crimson','skyblue','teal']
fig=px.bar(techsupport_attrition_df,x='TechSupport',y='Customers',color='Churn',text='Customers',color_discrete_sequence=colors,barmode="group",
title='Churn by Tech Support')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
####3 MODELING ####
def feature_correlation(df):
df['TotalCharges']=df['TotalCharges'].fillna(df['TotalCharges'].mean()) # Impute TotalCharges null values with mean TotalCharges
df['Churn'].replace(to_replace='Yes', value=1, inplace=True)
df['Churn'].replace(to_replace='No', value=0, inplace=True)
df['SeniorCitizen'] = df['SeniorCitizen'].astype(str) # convert SeniorCitizen column to string
data_columns=['gender','Partner','Dependents','PhoneService','MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection', 'TechSupport', 'StreamingTV','StreamingMovies','Contract', 'PaperlessBilling', 'PaymentMethod','SeniorCitizen']
df=pd.get_dummies(df,columns=data_columns)
churn_corr_df=pd.DataFrame(df.corr()['Churn'])
churn_corr_df.reset_index(level=0, inplace=True)
churn_corr_df.columns=['Features','Correlation']
churn_corr_df["Correlation Type"] = np.where(churn_corr_df["Correlation"]<0, 'negative', 'positive')
churn_corr_df=churn_corr_df.sort_values(by=['Correlation'],ascending=False)
churn_corr_df=churn_corr_df[~churn_corr_df['Features'].isin(['Churn'])]
colors = ['skyblue','orange']
fig=px.bar(churn_corr_df,x='Features',y='Correlation',color='Correlation Type',text='Correlation',color_discrete_sequence=colors,
title='Features Correlation')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.50),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def feature_importance(feat_importance_df):
feat_importance_df=feat_importance_df.sort_values(by=['Importance'],ascending=False)
fig=px.bar(feat_importance_df,x='Features',y='Importance',text='Importance',color='Importance',height=650,title='Random Forest Feature Importance')
fig.update_layout(legend=dict(yanchor="top",y=0.99,xanchor="left",x=0.01),autosize=True,margin=dict(t=30,b=0,l=0,r=0))
return fig
def telco_churn_model_metrics_summary(telco_churm_metrics_df):
unpivoted_metric_df=telco_churm_metrics_df[telco_churm_metrics_df['Type']=='Metric'][['Model','Accuracy','Precision','Recall','F_1_Score','AUC_Score']]
unpivoted_metric_df=unpivoted_metric_df.melt(id_vars=['Model'], var_name='Metrics', value_name='Score').sort_values(by=['Score'],ascending=True)
colors = ['crimson','skyblue','teal','orange']
fig=px.bar(unpivoted_metric_df,x='Metrics',y='Score',color='Model',text='Score',color_discrete_sequence=colors,barmode="group",title='Model Perforance Metrics')
fig.update_layout(legend=dict(yanchor="top",y=0.95,xanchor="left",x=0.01),autosize=True,margin=dict(t=30,b=0,l=0,r=0)) #use barmode='stack' when stacking,
return fig
def uac_roc(telco_churm_metrics_df):
uac_roc_df=telco_churm_metrics_df[telco_churm_metrics_df['Type']=='ROC'][['Model','Confusion_Matrix_ROC']]
uac_roc_df=uac_roc_df.sort_values(by=['Model'],ascending=True)
uac_roc_df=uac_roc_df.set_index('Model').transpose()
uac_roc_fig = go.Figure()
uac_roc_fig.add_trace(go.Scatter(x=uac_roc_df['Logistic Regression FPR'][0], y=uac_roc_df['Logistic Regression TPR'][0],name='Logistic Regression',
line = dict(color='teal', width=2),line_shape='spline'))
uac_roc_fig.add_trace(go.Scatter(x=uac_roc_df['Random Forest FPR'][0], y=uac_roc_df['Random Forest TPR'][0],name='Random Forest',
line = dict(color='royalblue', width=2),line_shape='spline'))
uac_roc_fig.add_trace(go.Scatter(x=uac_roc_df['Support Vector Machine FPR'][0], y=uac_roc_df['Support Vector Machine TPR'][0],name='Support Vector Machine',
line = dict(color='orange', width=2),line_shape='spline'))
uac_roc_fig.add_trace(go.Scatter(x= | np.array([0., 1.]) | numpy.array |
"""
Project: RadarBook
File: back_projection_backhoe_example.py
Created by: <NAME>
On: 2/20/2019
Created with: PyCharm
Copyright (C) 2019 Artech House (<EMAIL>)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
import sys
from Chapter10.ui.BackProjectionBH_ui import Ui_MainWindow
from numpy import linspace, meshgrid, array, radians, amax, ones, squeeze, max, min
from scipy.signal.windows import hanning, hamming
from Libs.sar import backprojection
from scipy.io import loadmat
from mpl_toolkits.mplot3d import Axes3D
from PyQt5.QtWidgets import QApplication, QMainWindow
from matplotlib.backends.qt_compat import QtCore
from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
class BackProjection(QMainWindow, Ui_MainWindow):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
# Connect to the input boxes, when the user presses enter the form updates
self.x_span.returnPressed.connect(self._update_canvas)
self.y_span.returnPressed.connect(self._update_canvas)
self.z_span.returnPressed.connect(self._update_canvas)
self.nx_ny_nz.returnPressed.connect(self._update_canvas)
self.az_start_end.returnPressed.connect(self._update_canvas)
self.el_start_end.returnPressed.connect(self._update_canvas)
self.dynamic_range.returnPressed.connect(self._update_image_only)
self.polarization.currentIndexChanged.connect(self._update_canvas)
self.window_type.currentIndexChanged.connect(self._update_canvas)
# Set up a figure for the plotting canvas
fig = Figure()
self.fig = fig
self.axes1 = fig.add_subplot(111, projection='3d', facecolor='white')
self.my_canvas = FigureCanvas(fig)
self.axes1.mouse_init()
# Add the canvas to the vertical layout
self.verticalLayout.addWidget(self.my_canvas)
self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self))
# Update the canvas for the first display
self._update_canvas()
def _update_canvas(self):
"""
Update the figure when the user changes and input value.
:return:
"""
# Get the parameters from the form
x_span = float(self.x_span.text())
y_span = float(self.y_span.text())
z_span = float(self.z_span.text())
nx_ny_nz = self.nx_ny_nz.text().split(',')
self.nx = int(nx_ny_nz[0])
self.ny = int(nx_ny_nz[1])
self.nz = int(nx_ny_nz[2])
az_start_end = self.az_start_end.text().split(',')
az_start = int(az_start_end[0])
az_end = int(az_start_end[1])
el_start_end = self.el_start_end.text().split(',')
el_start = int(el_start_end[0])
el_end = int(el_start_end[1])
# Get the selected window from the form
window_type = self.window_type.currentText()
# Get the polarization from the form
polarization = self.polarization.currentText()
x = linspace(-0.5 * x_span, 0.5 * x_span, self.nx)
y = linspace(-0.5 * y_span, 0.5 * y_span, self.ny)
z = linspace(-0.5 * z_span, 0.5 * z_span, self.nz)
self.x_image, self.y_image, self.z_image = meshgrid(x, y, z, indexing='ij')
fft_length = 8192
# el 18 - 43 (-1)
# az 66 - 115 (-1)
# Initialize the image
self.bp = 0
# Loop over the azimuth and elevation angles
for el in range(el_start, el_end + 1):
for az in range(az_start, az_end + 1):
print('El {0:d} Az {1:d}'.format(el, az))
filename = '../../Backhoe_CP/3D_Challenge_Problem/3D_K_Space_Data/backhoe_el{0:03d}_az{1:03d}.mat'.format(el, az)
b = loadmat(filename)
# build a list of keys and values for each entry in the structure
vals = b['data'][0, 0] # <-- set the array you want to access.
keys = b['data'][0, 0].dtype.descr
# Assemble the keys and values into variables with the same name as that used in MATLAB
for i in range(len(keys)):
key = keys[i][0]
val = | squeeze(vals[key]) | numpy.squeeze |
from lenet5 import *
import numpy as np
import matplotlib.pyplot as plt
def get_data(dim=100, load=True):
"""
:param dim: images per worker
:param load:
:return: data_workers, y, lenet5, right_pred_data, labels, test_x, test_y.
Where right_pred_data is the variable of lenet5 correctly classified data.
"""
# ------------------- DATA preparation -------------------
_, (test_x, test_y) = load_MNIST()
path = '../data/lenet5'
lenet5 = LeNet5(path=path, load=load)
lab = lenet5.predict(test_x)
indexes = lab == test_y
right_pred_data = test_x[indexes]
right_pred_labels = test_y[indexes]
labels_number = 10
data_per_classes = []
for label_class in range(0, labels_number):
data_per_classes.append(right_pred_data[right_pred_labels == label_class][:dim])
data_per_classes = np.array(data_per_classes)
data_workers = []
step = dim//labels_number
for offset in range(0, dim, step):
image_worker = []
for c in range(0, labels_number):
image_worker.extend(data_per_classes[c, offset:offset+step, :, :, :])
data_workers.append(image_worker)
data_workers = np.array(data_workers)
y_workers = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
y_workers = np.repeat(y_workers, step)
return data_workers, y_workers, lenet5, right_pred_data, right_pred_labels, test_x, test_y
def get_image_perturbation(perturbation, title, axis):
"""
:param perturbation: a numpy perturbation to plot
:param title: title to give to the image genarated
:return: the plot of the axis and the image
"""
img = axis.imshow(perturbation.reshape((28, 28)))
axis.set_title(title)
plt.colorbar(img, ax=axis, fraction=0.03, pad=0.05)
return axis, img
def plot_perturbation(perturbation, title, file_path=None, figsize=(5,5)):
fig, ax = plt.subplots(figsize=figsize)
ax, img = get_image_perturbation(perturbation, title, ax)
if file_path is not None:
plt.savefig(file_path, bbox_inches="tight")
plt.show()
def get_image_perturbed(perturbation, image_test, title, axis):
image = image_test.reshape(28, 28)
img_noise = image + perturbation.reshape((28, 28))
img_noise = np.clip(img_noise, 0., 1.)
img = axis.imshow(img_noise, cmap='Greys')
axis.set_title(title)
plt.colorbar(img, ax=axis, fraction=0.03, pad=0.05)
return axis, img
def plot_perturbed_img(perturbation, image_test, file_path=None, figsize=(5,5)):
"""
:param perturbation: a numpy perturbation
:param image_test: a numpy image
:param file_path:
:return:
"""
image = image_test.reshape(28, 28)
img_noise = image + perturbation.reshape((28, 28))
img_noise = np.clip(img_noise, 0., 1.)
fig, ax = plt.subplots(1, 2, figsize=figsize)
a = ax[0].imshow(image, cmap='Greys')
b = ax[1].imshow(img_noise, cmap='Greys')
ax[0].set_title("Real image")
ax[1].set_title("Perturbed image")
fig.colorbar(a, ax=ax[0], fraction=0.03, pad=0.05)
fig.colorbar(b, ax=ax[1], fraction=0.03, pad=0.05)
fig.tight_layout(pad=0.3)
if file_path is not None:
plt.savefig(file_path, bbox_inches="tight")
plt.show()
def predict_single_img_perturbation(lenet5, image, delta):
"""
:param image: Single image to predict
:param delta: perturbation
:return: predicted class
"""
image_noise = image + delta.reshape(28, 28, 1)
image_noise = | np.clip(image_noise, 0., 1.) | numpy.clip |
import numpy as np
from scipy.special import gamma
import bisect
def vTmv(vec, mat=None, vec2=None):
"""Multiply a vector transpose times a matrix times a vector.
@param vec The first vector (will be transposed).
@param mat The matrix in the middle. Identity by default.
@param vec2 The second vector (will not be transposed.) By default, the same as the vec.
@returns Product. Could be a scalar or a matrix depending on whether vec is a row or column
vector.
"""
if len(vec.shape) == 1:
vec = np.reshape(vec, [vec.shape[0], 1])
if mat is None:
mat = np.eye(len(vec))
if vec2 is None:
vec2 = vec
return np.dot(vec.T, np.dot(mat, vec2))
def gammad(d, nu_over_2):
"""D-dimensional gamma function."""
nu = 2.0 * nu_over_2
return np.pi**(d*(d-1.)/4)*np.multiply.reduce([gamma(0.5*(nu+1-i)) for i in range(d)])
def random_wish(dof, S, size=None):
dim = S.shape[0]
if size is None:
x = np.random.multivariate_normal(np.zeros(dim), S, size=dof)
return | np.dot(x.T, x) | numpy.dot |
# -*- coding: utf-8 -*-
"""
dicom2nifti
@author: abrys
"""
import dicom2nifti.compressed_dicom as compressed_dicom
import os
import struct
from pydicom.tag import Tag
import logging
import numpy
from dicom2nifti.exceptions import ConversionValidationError, ConversionError
import dicom2nifti.settings
logger = logging.getLogger(__name__)
# Disable false positive numpy errors
# pylint: disable=E1101
def read_dicom_directory(dicom_directory, stop_before_pixels=False):
"""
Read all dicom files in a given directory (stop before pixels)
:type stop_before_pixels: bool
:type dicom_directory: str
:param stop_before_pixels: Should we stop reading before the pixeldata (handy if we only want header info)
:param dicom_directory: Directory with dicom data
:return: List of dicom objects
"""
dicom_input = []
for root, _, files in os.walk(dicom_directory):
for dicom_file in files:
file_path = os.path.join(root, dicom_file)
if compressed_dicom.is_dicom_file(file_path):
dicom_headers = compressed_dicom.read_file(file_path,
defer_size="1 KB",
stop_before_pixels=stop_before_pixels,
force=dicom2nifti.settings.pydicom_read_force)
if is_valid_imaging_dicom(dicom_headers):
dicom_input.append(dicom_headers)
return dicom_input
def is_hitachi(dicom_input):
"""
Use this function to detect if a dicom series is a hitachi dataset
:param dicom_input: directory with dicom files for 1 scan of a dicom_header
"""
# read dicom header
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
# check if manufacturer is hitachi
if 'HITACHI' not in header.Manufacturer.upper():
return False
return True
def is_ge(dicom_input):
"""
Use this function to detect if a dicom series is a GE dataset
:param dicom_input: list with dicom objects
"""
# read dicom header
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
# check if manufacturer is GE
if 'GE MEDICAL SYSTEMS' not in header.Manufacturer.upper():
return False
return True
def is_philips(dicom_input):
"""
Use this function to detect if a dicom series is a philips dataset
:param dicom_input: directory with dicom files for 1 scan of a dicom_header
"""
# read dicom header
header = dicom_input[0]
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
# check if manufacturer is Philips
if 'PHILIPS' not in header.Manufacturer.upper():
return False
return True
def is_siemens(dicom_input):
"""
Use this function to detect if a dicom series is a siemens dataset
:param dicom_input: directory with dicom files for 1 scan
"""
# read dicom header
header = dicom_input[0]
# check if manufacturer is Siemens
if 'Manufacturer' not in header or 'Modality' not in header:
return False # we try generic conversion in these cases
# check if Modality is mr
if header.Modality.upper() != 'MR':
return False
if 'SIEMENS' not in header.Manufacturer.upper():
return False
return True
def is_multiframe_dicom(dicom_input):
"""
Use this function to detect if a dicom series is a siemens 4D dataset
NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory
(containing one series)
:param dicom_input: directory with dicom files for 1 scan
"""
# read dicom header
header = dicom_input[0]
if Tag(0x0002, 0x0002) not in header.file_meta:
return False
if header.file_meta[0x0002, 0x0002].value == '1.2.840.10008.5.1.4.1.1.4.1':
return True
return False
def is_valid_imaging_dicom(dicom_header):
"""
Function will do some basic checks to see if this is a valid imaging dicom
"""
# if it is philips and multiframe dicom then we assume it is ok
try:
if is_philips([dicom_header]):
if is_multiframe_dicom([dicom_header]):
return True
if "SeriesInstanceUID" not in dicom_header:
return False
if "InstanceNumber" not in dicom_header:
return False
if "ImageOrientationPatient" not in dicom_header or len(dicom_header.ImageOrientationPatient) < 6:
return False
if "ImagePositionPatient" not in dicom_header or len(dicom_header.ImagePositionPatient) < 3:
return False
# for all others if there is image position patient we assume it is ok
if Tag(0x0020, 0x0037) not in dicom_header:
return False
return True
except (KeyError, AttributeError):
return False
def get_volume_pixeldata(sorted_slices):
"""
the slice and intercept calculation can cause the slices to have different dtypes
we should get the correct dtype that can cover all of them
:type sorted_slices: list of slices
:param sorted_slices: sliced sored in the correct order to create volume
"""
slices = []
combined_dtype = None
for slice_ in sorted_slices:
slice_data = _get_slice_pixeldata(slice_)
slice_data = slice_data[numpy.newaxis, :, :]
slices.append(slice_data)
if combined_dtype is None:
combined_dtype = slice_data.dtype
else:
combined_dtype = numpy.promote_types(combined_dtype, slice_data.dtype)
# create the new volume with with the correct data
vol = numpy.concatenate(slices, axis=0)
# Done
# if rgb data do separate transpose
if len(vol.shape) == 4 and vol.shape[3] == 3:
vol = numpy.transpose(vol, (2, 1, 0, 3))
else:
vol = numpy.transpose(vol, (2, 1, 0))
return vol
def _get_slice_pixeldata(dicom_slice):
"""
the slice and intercept calculation can cause the slices to have different dtypes
we should get the correct dtype that can cover all of them
:type dicom_slice: pydicom object
:param dicom_slice: slice to get the pixeldata for
"""
data = dicom_slice.pixel_array
# fix overflow issues for signed data where BitsStored is lower than BitsAllocated and PixelReprentation = 1 (signed)
# for example a hitachi mri scan can have BitsAllocated 16 but BitsStored is 12 and HighBit 11
if dicom_slice.BitsAllocated != dicom_slice.BitsStored and \
dicom_slice.HighBit == dicom_slice.BitsStored - 1 and \
dicom_slice.PixelRepresentation == 1:
if dicom_slice.BitsAllocated == 16:
data = data.astype(numpy.int16) # assert that it is a signed type
max_value = pow(2, dicom_slice.HighBit) - 1
invert_value = -1 ^ max_value
data[data > max_value] = numpy.bitwise_or(data[data > max_value], invert_value)
pass
return apply_scaling(data, dicom_slice)
def _is_float(float_value):
"""
Check if a number is actually a float
:type float_value: int or float
:param float_value: number to check
:return True if it is not an integer number
"""
if int(float_value) != float_value:
return True
def get_numpy_type(dicom_header):
"""
Make NumPy format code, e.g. "uint16", "int32" etc
from two pieces of info:
mosaic.PixelRepresentation -- 0 for unsigned, 1 for signed;
mosaic.BitsAllocated -- 8, 16, or 32
:param dicom_header: the read dicom file/headers
:returns: numpy format string
"""
format_string = '%sint%d' % (('u', '')[dicom_header.PixelRepresentation], dicom_header.BitsAllocated)
try:
numpy.dtype(format_string)
except TypeError:
raise TypeError("Data type not understood by NumPy: format='%s', PixelRepresentation=%d, BitsAllocated=%d" %
(format_string, dicom_header.PixelRepresentation, dicom_header.BitsAllocated))
return format_string
def get_fd_array_value(tag, count):
"""
Getters for data that also work with implicit transfersyntax
:param count: number of items in the array
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
values = []
for i in range(count):
start = i * 8
stop = (i + 1) * 8
values.append(struct.unpack('d', tag.value[start:stop])[0])
return numpy.array(values)
return tag.value
def get_fd_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.unpack('d', tag.value)[0]
return value
return tag.value
def set_fd_value(tag, value):
"""
Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('d', value)
tag.value = value
def get_fl_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.unpack('f', tag.value)[0]
return value
return tag.value
def get_is_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
# data is int formatted as string so convert te string first and cast to int
if tag.VR == 'OB' or tag.VR == 'UN':
value = int(tag.value.decode("ascii").replace(" ", ""))
return value
return int(tag.value)
def get_ss_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
# data is int formatted as string so convert te string first and cast to int
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.unpack('h', tag.value)[0]
return value
return tag.value
def set_ss_value(tag, value):
"""
Setter for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read
"""
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('h', value)
tag.value = value
def apply_scaling(data, dicom_headers):
"""
Rescale the data based on the RescaleSlope and RescaleOffset
Based on the scaling from pydicomseries
:param dicom_headers: dicom headers to use to retreive the scaling factors
:param data: the input data
"""
# Apply the rescaling if needed
private_scale_slope_tag = Tag(0x2005, 0x100E)
private_scale_intercept_tag = Tag(0x2005, 0x100D)
if 'RescaleSlope' in dicom_headers or 'RescaleIntercept' in dicom_headers \
or private_scale_slope_tag in dicom_headers or private_scale_intercept_tag in dicom_headers:
rescale_slope = 1
rescale_intercept = 0
if 'RescaleSlope' in dicom_headers:
rescale_slope = dicom_headers.RescaleSlope
if 'RescaleIntercept' in dicom_headers:
rescale_intercept = dicom_headers.RescaleIntercept
# try:
# # this section can sometimes fail due to unknown private fields
# if private_scale_slope_tag in dicom_headers:
# private_scale_slope = float(dicom_headers[private_scale_slope_tag].value)
# if private_scale_slope_tag in dicom_headers:
# private_scale_slope = float(dicom_headers[private_scale_slope_tag].value)
# except:
# pass
return do_scaling(data, rescale_slope, rescale_intercept)
else:
return data
def do_scaling(data, rescale_slope, rescale_intercept, private_scale_slope=1.0, private_scale_intercept=0.0):
# Obtain slope and offset
need_floats = False
if int(rescale_slope) != rescale_slope or \
int(rescale_intercept) != rescale_intercept or \
private_scale_slope != 1.0 or \
private_scale_intercept != 0.0:
need_floats = True
if not need_floats:
rescale_slope = int(rescale_slope)
rescale_intercept = int(rescale_intercept)
else:
rescale_slope = float(rescale_slope)
rescale_intercept = float(rescale_intercept)
private_scale_slope = float(private_scale_slope)
private_scale_intercept = float(private_scale_intercept)
# Maybe we need to change the datatype?
if data.dtype in [numpy.float32, numpy.float64]:
pass
elif need_floats:
data = data.astype(numpy.float32)
else:
# Determine required range
minimum_required, maximum_required = data.min(), data.max()
minimum_required = min([minimum_required, minimum_required * rescale_slope + rescale_intercept,
maximum_required * rescale_slope + rescale_intercept])
maximum_required = max([maximum_required, minimum_required * rescale_slope + rescale_intercept,
maximum_required * rescale_slope + rescale_intercept])
# Determine required datatype from that
if minimum_required < 0:
# Signed integer type
maximum_required = max([-minimum_required, maximum_required])
if maximum_required < 2 ** 7:
dtype = numpy.int8
elif maximum_required < 2 ** 15:
dtype = numpy.int16
elif maximum_required < 2 ** 31:
dtype = numpy.int32
else:
dtype = numpy.float32
else:
# Unsigned integer type
if maximum_required < 2 ** 8:
dtype = numpy.uint8
elif maximum_required < 2 ** 16:
dtype = numpy.uint16
elif maximum_required < 2 ** 32:
dtype = numpy.uint32
else:
dtype = numpy.float32
# Change datatype
if dtype != data.dtype:
data = data.astype(dtype)
# Apply rescale_slope and rescale_intercept
# Scaling according to ISMRM2013_PPM_scaling_reminder
# The actual scaling is not does the scaling the same way as the next code example
# and https://github.com/fedorov/DICOMPhilipsRescalePlugin/blob/master/DICOMPhilipsRescalePlugin.py
# FOR DEFAULT DATA
# RESULT_DATA = (STORED_VALUE * RESCALE_SLOPE) + RESCALE_INTERCEPT
# FOR PHILIPS DATA
# RESULT_DATA = (STORED_VALUE * PRIVATE_SCALE_SLOPE) + PRIVATE_SCALE_INTERCEPT
if private_scale_slope == 1.0 and private_scale_intercept == 0.0:
data = (data * rescale_slope) + rescale_intercept
else:
data = (data * private_scale_slope) + private_scale_intercept
return data
def write_bvec_file(bvecs, bvec_file):
"""
Write an array of bvecs to a bvec file
:param bvecs: array with the vectors
:param bvec_file: filepath to write to
"""
if bvec_file is None:
return
logger.info('Saving BVEC file: %s' % bvec_file)
with open(bvec_file, 'w') as text_file:
# Map a dicection to string join them using a space and write to the file
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 0])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 1])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 2])))
def write_bval_file(bvals, bval_file):
"""
Write an array of bvals to a bval file
:param bvals: array with the values
:param bval_file: filepath to write to
"""
if bval_file is None:
return
logger.info('Saving BVAL file: %s' % bval_file)
with open(bval_file, 'w') as text_file:
# join the bvals using a space and write to the file
text_file.write('%s\n' % ' '.join(map(str, bvals)))
def create_affine(sorted_dicoms):
"""
Function to generate the affine matrix for a dicom series
This method was based on (http://nipy.org/nibabel/dicom/dicom_orientation.html)
:param sorted_dicoms: list with sorted dicom files
"""
# Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine)
image_orient1 = numpy.array(sorted_dicoms[0].ImageOrientationPatient)[0:3]
image_orient2 = numpy.array(sorted_dicoms[0].ImageOrientationPatient)[3:6]
delta_r = float(sorted_dicoms[0].PixelSpacing[0])
delta_c = float(sorted_dicoms[0].PixelSpacing[1])
image_pos = numpy.array(sorted_dicoms[0].ImagePositionPatient)
last_image_pos = numpy.array(sorted_dicoms[-1].ImagePositionPatient)
if len(sorted_dicoms) == 1:
# Single slice
slice_thickness = 1
if "SliceThickness" in sorted_dicoms[0]:
slice_thickness = sorted_dicoms[0].SliceThickness
step = - numpy.cross(image_orient1, image_orient2) * slice_thickness
else:
step = (image_pos - last_image_pos) / (1 - len(sorted_dicoms))
# check if this is actually a volume and not all slices on the same location
if numpy.linalg.norm(step) == 0.0:
raise ConversionError("NOT_A_VOLUME")
affine = numpy.array(
[[-image_orient1[0] * delta_c, -image_orient2[0] * delta_r, -step[0], -image_pos[0]],
[-image_orient1[1] * delta_c, -image_orient2[1] * delta_r, -step[1], -image_pos[1]],
[image_orient1[2] * delta_c, image_orient2[2] * delta_r, step[2], image_pos[2]],
[0, 0, 0, 1]]
)
return affine, | numpy.linalg.norm(step) | numpy.linalg.norm |
import abc
from collections import defaultdict
import contextlib
import functools
from logging import getLogger
import numpy as np
import tensorflow as tf
logger = getLogger("rl")
def lazy_function(function):
attribute = '_cache_' + function.__name__
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
def purge_orphaned_summaries(summary_writer, step):
summary_writer.add_session_log(
tf.SessionLog(status=tf.SessionLog.START), step)
def read_events(event_filename, data=None, purge_orphaned=True):
data = data or defaultdict(dict)
for e in tf.train.summary_iterator(event_filename):
if purge_orphaned and e.session_log.status == tf.SessionLog.START:
for tag in data.keys():
data[tag] = {
step_time: val
for step_time, val in data[tag].items()
if step_time[0] < e.step
}
for v in e.summary.value:
data[v.tag][(e.step, e.wall_time)] = v.simple_value
return data
def orthogonal_initializer(scale=1.0):
# taken from https://github.com/openai/baselines/tree/master/baselines/ppo2
def _initializer(shape, dtype, partition_info=None):
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError("Not supported shape: {}".format(shape))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = | np.linalg.svd(a, full_matrices=False) | numpy.linalg.svd |
from pvtrace.geometry.geometry import Geometry
from pvtrace.geometry.utils import angle_between, EPS_ZERO
import numpy as np
import logging
logger = logging.getLogger(__name__)
class Sphere(Geometry):
"""A sphere defined by a radius. The sphere centre is always at (0, 0, 0)
in it's own coordinate system.
"""
def __init__(self, radius, material=None):
super(Sphere, self).__init__()
self.radius = radius
self._material = material
@property
def material(self):
return self._material
@material.setter
def set_material(self, new_value):
self._material = new_value
def is_on_surface(self, point):
r = np.sqrt(np.sum(np.array(point) ** 2))
return np.abs(r - self.radius) < EPS_ZERO
def contains(self, point):
r = np.sqrt(np.sum(np.array(point) ** 2))
return self.radius - (r + EPS_ZERO) > 0.0
def intersections(self, origin, direction):
# Compute a, b and b coefficients
origin = np.array(origin)
direction = | np.array(direction) | numpy.array |
# This file is a derivative of repeat_copy.py created by SiliconSloth.
# The license header of the original file is retained here.
#
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from collections import OrderedDict
from scipy.sparse import csr_matrix
"""
Generates multiplication task samples.
"""
class MultiplicationTask():
def __init__(self, config):
self.rng = np.random.RandomState(seed=config['seed'])
self.feature_width = config['feature_width']
# self.tokens = {'rep': 'REP'}
self.config = config
self.samples = self.create_samples(config['set_list'], self.feature_width)
def create_samples(self, set_list, feature_width):
print('### MultiplicationTask: create data')
samples = {}
# for set, conf in set_list.items():
# samples[set] = []
# for i in range(conf["quantity"]):
# if conf["min_length"] < conf["max_length"]:
# length = self.rng.randint(conf["min_length"], conf["max_length"])
# else:
# length = conf["min_length"]
# samples[set].append(self.create_sample(length, self.feature_width))
return samples
def create_sample(self, length, feature_width):
# Random number between 0 and the max value, inclusive.
result = self.rng.randint(feature_width**length)
# Choose a factor between 1 and the result, or any valid number if the result is 0.
factor1 = self.rng.randint(result) + 1 if result > 0 else self.rng.randint(feature_width**length)
# Get the second factor by dividing by the first and rounding down.
factor2 = int(result / factor1)
# Compute the new result with the rounded factor.
result = factor1 * factor2
sequence1 = self.int_to_sequence(factor1, length, feature_width)
sequence2 = self.int_to_sequence(factor2, length, feature_width)
answer = self.int_to_sequence(result, length, feature_width)
x_word = np.concatenate([sequence1, [feature_width], sequence2, [feature_width], [0 for _ in range(length)]])
y_word = np.concatenate([[0 for _ in range(length*2 + 2)], answer])
sample = OrderedDict()
sample['x_word'] = x_word
sample['x'] = self._numbers_to_onehot(x_word, feature_width + 1)
sample['y'] = self._numbers_to_onehot(y_word, feature_width)
sample['m'] = np.concatenate([[0 for _ in range(length*2 + 2)], [1 for _ in range(length)]])
return sample
@staticmethod
def int_to_sequence(value, length, feature_width):
seq = np.ndarray(length, dtype=np.int32)
for i in range(length):
seq[i] = value % feature_width
value = int(value / feature_width)
return seq
@staticmethod
def _numbers_to_onehot(numbers, size):
length = numbers.__len__()
row = np.arange(length)
data = np.ones(length)
matrix = csr_matrix((data, (row, numbers)), shape=(length, size)).toarray() # super fast
return matrix
@staticmethod
def _zeros_matrix(len, width):
row = np.arange(len)
col = np.zeros(len)
data = np.zeros(len)
padding = csr_matrix((data, (row, col)), shape=(len, width)).toarray()
return padding
def get_sample(self, set_name, number):
conf = self.config['set_list'][set_name]
return self.create_sample(self.rng.randint(conf["min_length"], conf["max_length"]), self.feature_width)
@property
def vocabulary_size(self):
return self.feature_width + 1
@property
def x_size(self):
return self.feature_width + 1
@property
def y_size(self):
return self.feature_width
def sample_amount(self, set_name):
return self.config['set_list'][set_name]['quantity']
def decode_output(self, sample, prediction):
pass
def patch_batch(self, list_of_samples):
batch = {'x': [], 'y': [], 'm': [], 'x_word': []}
len = []
for sample in list_of_samples:
len.append(sample['x'].shape[0])
batch['x_word'].append(sample['x_word'])
max_len = np.max(len)
for sample in list_of_samples:
cur_len = sample['x'].shape[0]
if cur_len < max_len:
add_len = max_len - cur_len
x_add = self._zeros_matrix(add_len, self.x_size)
batch['x'].append(np.concatenate([sample['x'], x_add], axis=0))
y_add = self._zeros_matrix(add_len, self.y_size)
batch['y'].append(np.concatenate([sample['y'], y_add], axis=0))
m_add = np.zeros([add_len])
batch['m'].append(np.concatenate([sample['m'], m_add], axis=0))
else:
for key in ['x', 'y', 'm']:
batch[key].append(sample[key])
for key in ['x', 'y', 'm']:
batch[key] = np.stack(batch[key], axis=0)
batch['x'] = np.transpose(batch['x'], axes=(1, 0, 2))
batch['y'] = np.transpose(batch['y'], axes=(1, 0, 2))
batch['m'] = np.transpose(batch['m'], axes=(1, 0))
return batch
@staticmethod
def decode_output(sample, prediction):
if prediction.shape.__len__() == 3:
prediction_decode_list = []
target_decode_list = []
for b in range(prediction.shape[1]):
target_decode_list.append([np.argmax(sample['y'][i, b, :]) for i in range(sample['y'].shape[0])])
prediction_decode_list.append([np.argmax(prediction[i, b, :]) for i in range(prediction.shape[0])])
return target_decode_list, prediction_decode_list
else:
target_decode = [np.argmax(sample['y'][i, :]) for i in range(sample['y'].shape[0])]
prediction_decode = [ | np.argmax(prediction[i, :]) | numpy.argmax |
import math
import numpy as np
import warnings
from collections import Counter
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import calinski_harabasz_score, davies_bouldin_score
from sklearn.preprocessing import MinMaxScaler
from .metrics import gap_statistic, silhouette_score_block
from ..utils.os_utl import check_types, filter_kwargs
from ..utils.log_utl import print_progress_bar, printv
"""
GapStatistic estimator is the Miles Granger implementation 2016.04.25.1430 shared in
https://anaconda.org/milesgranger/gap-statistic/notebook
"""
class EstimatorMixin:
def __init__(self, cluster_fn=MiniBatchKMeans, **kwargs):
self.K = 0
self.best_votes = list()
self.cluster = cluster_fn
self.metrics = None
@staticmethod
def _get_best_votes(arr):
k_best_n = arr[arr[:, 1].argsort()[::-1], 0].astype(int)
return k_best_n.tolist()
@staticmethod
def _get_max_k(x, max_k):
if max_k > x.shape[0]:
print(f'Can only use number of clusters lower than number of examples ({x.shape[0]}).')
return min(max_k, x.shape[0])
def plot_metric(self, ax=None, show=True, normalise=False, n_votes=3):
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=(24, 12))
# metric
x, y = self.metrics.T
if normalise:
y = MinMaxScaler().fit_transform(y.reshape(-1, 1))
ax.plot(x, y, label=f'{self.__class__.__qualname__} ({self.best_votes[0]})', linewidth=0.7)
# votes
votes = np.array(self.best_votes[:n_votes])
ax.scatter(votes, y[votes - 1], color=ax.lines[-1].get_color(), alpha=0.3, edgecolors='k',
s=np.array(range(25, (n_votes+1)*15 + 1, 15))[::-1])
if show:
ax.legend()
plt.show()
return
def __repr__(self):
return f'{self.__class__.__module__}.{self.__class__.__qualname__}'
class Riddle(EstimatorMixin):
"""
Riddle K-estimator.
Estimates the correct value for K using the reciprocal delta log rule.
"""
@check_types(x=np.ndarray, max_k=int)
def fit(self, x, max_k=50, **kwargs):
"""
Directly fit this estimator to original data considering max_k clusters.
Kwargs passed are not used. They are included for estimators compatibility only
:param np.ndarray x: Array with the data to cluster
:param int max_k: Maximum number of clusters to try to find
:param kwargs:
:return int: Best number of clusters
"""
# calculate s_k
max_k = self._get_max_k(x, max_k)
s_k = np.array([self.cluster(k).fit(x).inertia_ for k in range(1, max_k + 1)])
return self.fit_s_k(s_k)
@check_types(s_k=(np.ndarray, list, tuple))
def fit_s_k(self, s_k, **kwargs):
"""
Fit the estimator to the distances of each datapoint to assigned cluster centroid
Kwargs passed are not used. They are included for estimators compatibility only
:param np.ndarray|list|tuple s_k: Collection of inertias_ for each n_cluster explored
:param kwargs:
:return int: Best number of clusters
"""
if isinstance(s_k, (list, tuple)):
s_k = np.array(s_k)
r_k = 1/s_k
n_cl = range(1, len(r_k) + 1)
diff = np.pad( | np.diff(r_k) | numpy.diff |
from __future__ import print_function
import numpy as np
import sys
import math
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_normal, xavier_uniform
from plyfile import (PlyData, PlyElement)
# --------------------------------
# MESH IO
# --------------------------------
def load_ply_data(filename):
""" read ply file, only vertices and faces """
plydata = PlyData.read(filename)
vertices = plydata['vertex'].data[:]
vertices = np.array([[x, y, z] for x,y,z in vertices])
# input are all traingle meshes
faces = plydata['face'].data['vertex_indices'][:]
faces = np.array([[f1, f2, f3] for f1,f2,f3 in faces])
return vertices, faces
def save_ply_data(filename, vertex, face):
""" save ply file, only vertices and faces """
vertices = np.zeros(vertex.shape[0], dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
for i in range(vertex.shape[0]):
vertices[i] = (vertex[i][0], vertex[i][1], vertex[i][2])
# print(vertex, vertex.dtype)
faces = np.zeros(face.shape[0], dtype=[('vertex_indices', 'i4', (3,))])
for i in range(face.shape[0]):
faces[i] = ([face[i][0], face[i][1], face[i][2]])
# print(faces.shape, faces.dtype)
e1 = PlyElement.describe(vertices, 'vertex')
e2 = PlyElement.describe(faces, 'face')
PlyData([e1, e2], text=True).write(filename)
print('file saved')
def load_obj_data(filename):
"""
A simply obj reader which reads vertices and faces only.
i.e. lines starting with v and f only
"""
mesh = {}
ver =[]
fac = []
if not path.endswith('obj'):
sys.exit('the input file is not a obj file')
with open(filename) as f:
for line in f:
if line.strip():
inp = line.split()
if(inp[0]=='v'):
ver.append([float(inp[1]), float(inp[2]), float(inp[3])])
elif(inp[0]=='f'):
fac.append([float(inp[1]), float(inp[2]), float(inp[3])])
V = np.array(ver)
F = np.array(fac)
return V, F
# --------------------------------
# Mesh Utils
# --------------------------------
def jitter_vertices(vertices, sigma=0.01, clip=0.05):
""" Randomly jitter points. jittering is per point.
Input:
Nx3 array, original shape
Output:
Nx3 array, jittered shape
"""
N, C = vertices.shape
assert(clip > 0)
jittered_data = np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
jittered_data += vertices
return jittered_data
def rotate_vertices(vertices):
""" Randomly rotate the points to augument the dataset
rotation is per shape based along up direction
Input:
Nx3 array, input shape
Output:
Nx3 array, rotated shape
"""
rotated_data = np.zeros(vertices.shape, dtype=np.float32)
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
rotated_data = np.dot(vertices, rotation_matrix)
return rotated_data
def rotate_vertices_by_angle(vertices, rotation_angle):
""" Randomly rotate the points by rotation_angle to augument the dataset
rotation is per shape based along up direction
Input:
Nx3 array, input shape
rotation_angle in radians
Output:
Nx3 array, rotated shape
"""
rotated_data = np.zeros(vertices.shape, dtype=np.float32)
# rotation_angle = np.random.uniform() * 2 * np.pi
cosval = | np.cos(rotation_angle) | numpy.cos |
#Reader for the coco panoptic data set for pointer based image segmentation
import numpy as np
import os
import scipy.misc as misc
import random
import cv2
import json
import threading
############################################################################################################
def rgb2id(color): # Convert annotation map from 3 channel RGB to instance
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.uint32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return color[0] + 256 * color[1] + 256 * 256 * color[2]
#########################################################################################################################
#########################################################################################################################
class Reader:
# Initiate reader and define the main parameters for the data reader
def __init__(self, ImageDir="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/COCO/train2017",AnnotationDir="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/COCO/COCO_panoptic/panoptic_train2017/panoptic_train2017", DataFile="/media/sagi/9be0bc81-09a7-43be-856a-45a5ab241d90/Data_zoo/COCO/COCO_panoptic/panoptic_train2017.json",MaxBatchSize=100,MinSize=250,MaxSize=800,MaxPixels=800*800*5, AnnotationFileType="png", ImageFileType="jpg",UnlabeledTag=0,Suffle=True,TrainingMode=True):
self.ImageDir=ImageDir # Image dir
self.AnnotationDir=AnnotationDir # File containing image annotation
self.MaxBatchSize=MaxBatchSize # Max number of image in batch
self.MinSize=MinSize # Min image width and hight in pixels
self.MaxSize=MaxSize #Max image width and hight in pixels
self.MaxPixels=MaxPixels # Max number of pixel in all the batch (reduce to solve oom out of memory issues)
self.AnnotationFileType=AnnotationFileType # What is the the type (ending) of the annotation files
self.ImageFileType=ImageFileType # What is the the type (ending) of the image files
self.DataFile=DataFile # Json File that contain data on the annotation of each image
self.UnlabeledTag=UnlabeledTag # Value of unlabled region in the annotation map (usually 0)
self.ReadStuff = True # Read things that are not instace object (like sky or grass)
self.SplitThings = False#True # Split instance of things (object) to connected component region and use each connected region as an instance
self.SplitStuff = True # Split instance of things (object) to connected component region and use each connected region as instance
self.SplitCrowd = True # Split areas marked as Crowds using connected componennt
self.IgnoreCrowds = True # Ignore areas marked as crowd
self.PickBySize = True # Pick instances of with probablity proportional to their sizes
self.StuffAreaFactor=0.225 # Since we pick segments according to their size stuf segments (ground, sky) will have higher probability to be chosen compare to things (objets) this factor balance this
self.MinSegSize=100 # Ignore segments which are smaller then this size in pixel
self.Epoch = 0 # Training Epoch
self.itr = 0 # Training iteratation
self.suffle=Suffle # Suffle list of file
#........................Read data file................................................................................................................
with open(DataFile) as json_file:
self.AnnData=json.load(json_file)
#-------------------Get All files in folder--------------------------------------------------------------------------------------
self.FileList=[]
for FileName in os.listdir(AnnotationDir):
if AnnotationFileType in FileName:
self.FileList.append(FileName)
if self.suffle:
random.shuffle(self.FileList)
if TrainingMode: self.StartLoadBatch()
##############################################################################################################################################
#Get annotation data for specific nmage from the json file
def GetAnnnotationData(self, AnnFileName):
for item in self.AnnData['annotations']: # Get Annotation Data
if (item["file_name"] == AnnFileName):
return(item['segments_info'])
############################################################################################################################################
#Get information for specific catagory/Class id
def GetCategoryData(self,ID):
for item in self.AnnData['categories']:
if item["id"]==ID:
return item["name"],item["isthing"]
##########################################################################################################################################3333
#Split binary mask correspond to a singele segment into connected components
def GetConnectedSegment(self, Seg):
[NumCCmp, CCmpMask, CCompBB, CCmpCntr] = cv2.connectedComponentsWithStats(Seg.astype(np.uint8)) # apply connected component
Mask=np.zeros([NumCCmp,Seg.shape[0],Seg.shape[1]],dtype=bool)
BBox=np.zeros([NumCCmp,4])
Sz=np.zeros([NumCCmp],np.uint32)
for i in range(1,NumCCmp):
Mask[i-1] = (CCmpMask == i)
BBox[i-1] = CCompBB[i][:4]
Sz[i-1] = CCompBB[i][4] #segment Size
return Mask,BBox,Sz,NumCCmp-1
#################################################################################################################################################
# Pick and return random segment from the list and remove it from the segment list
def PickRandomSegment(self,Sgs,SumAreas):
if self.PickBySize: # Pick random segment with probability proportional to size
r = np.random.randint(SumAreas) + 1
TotAreas=0
for ind in range(Sgs.__len__()):
TotAreas+=Sgs[ind]['Area']
if TotAreas>=r:
break
else: ind=np.random.randint(len(Sgs)) #Pick Random segment with equal probability
# print("ind" + str(ind))
SelectedSg=Sgs.pop(ind)
SumAreas-=SelectedSg["Area"]
return SelectedSg,SumAreas
##########################################################################################################################
# Pick set of segments from the list and generate random ROI map by taking the inverse of the region define by these segments area
def GenerateRandomROIMask(self, Sgs, SumAreas):
ROI = np.ones(Sgs[0]["Mask"].shape)
if SumAreas<=0 and np.random.randint(6)==0: return ROI
r = np.random.randint(SumAreas) + 1
while (SumAreas>r):
SumAreasOld=SumAreas
SelectedSg, SumAreas=self.PickRandomSegment( Sgs, SumAreas)
# misc.imshow(SelectedSg["Mask"].astype(float))
if SumAreas>r:
ROI[SelectedSg["Mask"]]=0
# misc.imshow(ROI.astype(float))
else:
if np.random.randint(SumAreas,SumAreasOld)>r:# and (SumAreas>1000):
ROI[SelectedSg["Mask"]] = 0
else:
Sgs.append(SelectedSg)
return(ROI)
#############################################################################################################################
############################################################################################################################
#Pick random point from segment given as a binary mask
def PickRandomPointInSegment(self,Seg,ErodeMask=10):
x0 = int(np.floor(Seg["BBox"][0])) # Bounding box x position
Wbox = int(np.floor(Seg["BBox"][2])) # Bounding box width
y0 = int(np.floor(Seg["BBox"][1])) # Bounding box y position
Hbox = int(np.floor(Seg["BBox"][3])) # Bounding box height
if ErodeMask:
Msk = cv2.erode(Seg["Mask"].astype(np.uint8), np.ones((3, 3), np.uint8), iterations=ErodeMask)
if Msk.sum()==0: Msk=Seg["Mask"]
else:
Msk = Seg["Mask"]
while(True):
x = np.random.randint(Wbox) + x0
y = np.random.randint(Hbox) + y0
if (Msk[y,x])==1:
return x,y
##############################################################################################################################
# Display loaded data on screen (for debuging)
def DisplayTrainExample(self,Img2,ROI2,Segment2,SelectedPoint2):
Img=Img2.copy()
ROI=ROI2.copy()
Segment=Segment2.copy()
SelectedPoint=SelectedPoint2.copy()
misc.imshow(Img)
SelectedPoint = cv2.dilate(SelectedPoint.astype(np.uint8), np.ones((3, 3), np.uint8), iterations=1)
Img[SelectedPoint][:]=[255,0,0]
Img[:, :, 0] = SelectedPoint.astype(np.uint8)*255+ (1-SelectedPoint.astype(np.uint8))*Img[:, :, 0]
Img[:, :, 1] *= 1-SelectedPoint.astype(np.uint8)
Img[:, :, 2] *= 1-SelectedPoint.astype(np.uint8)
Img[ :, :, 0] *= 1-(ROI.astype(np.uint8)-Segment.astype(np.uint8))
#Img[:, :, 1] += ROI.astype(np.uint8)*40
Img[ :, :, 2] *= 1 - Segment.astype(np.uint8)
# misc.imshow(Img)
#print(ROI.mean())
ROI[0,0]=0
misc.imshow(ROI.astype(float))
misc.imshow( Segment.astype(float))
misc.imshow(SelectedPoint.astype(float))
misc.imshow(Img)
#############################################################################################################################
# Crop and resize image and mask and ROI to feet batch size
def CropResize(self,Img, Mask,bbox,ROImask,Px,Py,Hb,Wb):
# ========================resize image if it too small to the batch size==================================================================================
[h, w, d] = Img.shape
Rs = | np.max((Hb / h, Wb / w)) | numpy.max |
""" Port of Andy's Matlab scripts """
import numpy as np
from IPython import embed
#%
#% This script plots the solution for the 2-layer solution of the
#% ventilated thermocline equation of LPS. The script follows
#% Pedlosky (1996, Ocean Circulation Theory), section 4.4.
#%
#% The x-coordinate is longitude in radians, and the y-coordinate
#% is f/f0, starting at the equator.
#%
#%
#%
#%
#%
#%
#%
#%
#%%%%%%%%%%%%%%% DO NOT EDIT THE FILE BELOW HERE %%%%%%%%
#%
def two_layers(theta0=60.,theta2=50., rho1=1025.50,rho2=1026.75,
Lx=5e6, W0=2e-6, H2=400, max_depth=-1200):
"""[summary]
Args:
theta0 ([type], optional): [description]. Defaults to 60..
theta2 ([type], optional): [description]. Defaults to 50..
rho1 (float, optional): [description]. Defaults to 1025.50.
rho2 (float, optional): [description]. Defaults to 1026.75.
Lx (width of the box in m, optional): [description]. Defaults to 5e6.
W0 ([type], optional): [description]. Defaults to 2e-6.
H2 (int, optional): [description]. Defaults to 400.
max_depth (int, optional): [description]. Defaults to -1200.
#% Specify the layer densities of the active upper two layers (kg/(m*m*m)).
#% Northern most extent of the model domain (degrees).
#% Latitude of the outcrop line for layer 2 (degrees).
#% Width of the domain (m).
#% Amplitude of the Ekman pumping velocity (m/s).
#% Depth of layer 2 along the eastern boundary (m).
#% NOTE:
#% Define max plotting depth (m). This parameter controls the maximum value
#% plotted on the depth axis. You may need to adjust this if in some of your
#% calculations your layer depths exceed the value of -1200m prescribed here.
"""
g=9.81
rho3=1027.50
#%
# Layer 1 reduced gravity.
gamma1=(rho2-rho1)*g/rho3
# Layer 2 reduced gravity.
gamma2=(rho3-rho2)*g/rho3
#
# Define grid.
#
im=201
jm=201
# Position of y-transect for plotting.
xtrans=Lx/2
# Position of x-transect for plotting.
ytrans=theta0/2
# Earth radius.
eradius=6.371e6
# Angular rotation rate of earth.
Omega=7.292e-5
theta0=theta0*2*np.pi/360
theta2=theta2*2*np.pi/360
f0=2*Omega*np.sin(theta0)
f2=2*Omega*np.sin(theta2)
# Latitude grid-spacing.
dtheta=theta0/(jm-1)
# Longitude grid-spacing.
dx=Lx/(im-1)
dphi=dx/eradius
phie=(im-1)*dphi
#
# Coordinate arrays for plotting.
xarr=np.zeros((im,jm))
yarr=np.zeros((jm,jm))
for i in range(im): #=1:im
xarr[i,:]=i*dphi*eradius/1000
for j in range(jm): #1:jm
yarr[:,j]=j*dtheta*eradius/1000
#embed(header='88 of vt')
#
# Coriolis parameter.
#
#for j=1:jm
theta= np.arange(jm)*dtheta
f=2*Omega*np.sin(theta)
#
# Ekman pumping - Pedlosky eqn 4.4.25.
#
we=np.zeros((im,jm))
for j in range(jm): #1:jm
we[:,j]=-W0*f0*f0*np.sin(np.pi*f[j]/f0)/(f[j]*f[j])
#
# D0^2 from Pedlosky eqn 4.4.26 but NOT using the H2 scaling,
# but instead using the actual factor from 4.4.5 so that H2,
# W0, gamma2, phie and theta0 can be variable parameters.
#
D02=np.zeros((im,jm))
D0fact=4*eradius*eradius*W0*Omega*np.sin(
theta0)*np.sin(theta0)*phie/gamma2
for j in range(jm): #1:jm
for i in range(im): #=1:im
phi=i*dphi
D02[i,j]=D0fact*(1-phi/phie)*np.sin(np.pi*f[j]/f0)
#
# Single layer region f0 <= f <= f2, Pedlosky eqn 4.4.6.
#
# h2(i,j)=sqrt(D02(i,j)+H2*H2);
# h(i,j)=h2(i,j);
h2 = np.sqrt(D02+H2*H2)
h = h2.copy()
#
# Process of subduction, f2 < f <= 0..
#
# Pedlosky eqn 4.4.18, where h=h1+h2.
#
#for j=1:jm
#if f(j) <= f2
# for i=1:im
# h(i,j)=sqrt((D02(i,j)+H2*H2)/(1+gamma1*(1-f(j)/f2)^2/gamma2));
# end
gdf = f <= f2
for j in np.where(gdf)[0]:
for i in range(im): #=1:im
h[i,j]=np.sqrt((D02[i,j]+H2*H2)/(
1+gamma1*(1-f[j]/f2)**2/gamma2))
#
# Pedlosky eqn 4.4.14a,b.
#
h1=np.zeros((im,jm))
for j in np.where(gdf)[0]:
for i in range(im): #=1:im
h1[i,j] = (1-f[j]/f2)*h[i,j]
h2[i,j] = f[j]*h[i,j]/f2
#
# The shadow zone.
# The latitude and longitude of the streamline that defines the
# poleward edge of the shadow zone can be computed by equating
# Pedlosky eqn 4.4.26 and 4.4.22.
# Namely:
# phi=phie*(1-fac*gamma1*(1-f/f2)*(1-f/f2)*H2*H2/gamma2)
# where fac=1/(D0fact*sin(pi*f/f0)).
#
#shadx=ones(jm,1)*phie*eradius/1000;
#shady=zeros(jm,1);
#for j=jm:-1:1
shadx = np.ones(jm)*phie*eradius/1000
shady = np.zeros_like(shadx)
gdj = np.where(gdf)[0]
phi=np.arange(im)*dphi
for j in range(jm-1,-1,-1):
shady[j]=j*dtheta*eradius/1000
if j in gdj:
fac=1/(D0fact*np.sin(np.pi*f[j]/f0))
phi_shadow=phie*(1-fac*gamma1*(1-f[j]/f2)**2*H2*H2/gamma2)
shadx[j]=phi_shadow*eradius/1000
#if j == 0:
# import pdb; pdb.set_trace()
gdphi = phi >= phi_shadow
for i in np.where(gdphi)[0]:
h[i,j]=H2
h1[i,j]=np.sqrt(gamma2*D02[i,j]/gamma1)
h2[i,j]=h[i,j]-h1[i,j]
#import pdb; pdb.set_trace()
#
# The western pool region.
# The latitude and longitude of the streamline that defines the
# eastern edge of the pool region can be found by equating Pedlosky
# eqn 4.6.2 and 4.4.26. It is assumed that the PV is homogenized in the
# pool region which yields Pedlosky eqn 4.6.6 for h and 4.6.5 for h2 in the pool
# in which case h1=h-h2.
# Namely:
# phi=phie*(1-fac*(D02w*(1+gamma1*(1-f/f2)^2/gamma2)/(2*H2^2)
# +gamma1*(f-f/f2)^2/(2*gamma2))
# where fac=1/(D0fact*sin(pi*f/f0)), and D02w is the value of D02 evaluated
# at (0,theta2)..
#
D02w=D0fact*np.sin(np.pi*f2/f0)
Gamma12=gamma1/gamma2
hw=np.sqrt(D02w+H2*H2)
pooly=np.arange(jm)*dtheta*eradius/1000
poolx= np.zeros_like(pooly)
# Tricky one!
phi= np.arange(im)*dphi
for j in np.flip(np.where(gdf)[0]):
fac=1/(D0fact*np.sin(np.pi*f[j]/f0))
fac1=Gamma12*(1-f[j]/f2)**2
phi_pool=phie*(1-fac*(D02w*(1+fac1)+H2*H2*fac1))
poolx[j] = max(phi_pool*eradius/1000, 0.)
gdphi = phi <= phi_pool
for i in np.where(gdphi)[0]:
h[i,j]=Gamma12*f[j]*hw/(
f2*(1+Gamma12))+np.sqrt(
(D02[i,j]+H2*H2)*(
1+Gamma12)-Gamma12*(
f[j]*hw/f2)**2)/(1+Gamma12)
h1[i,j]= h[i,j] - f[j]*hw/f2
#if (i == 10) and (j==10):
# import pdb; pdb.set_trace()
#embed(header='211 of vt')
#
psi1=np.nan*np.ones((im,jm))
psi2=np.nan*np.ones((im,jm))
hp1=h1.copy()
ps=shadx*1000/eradius
phi = | np.arange(im) | numpy.arange |
import time
import copy
import sys
import json
import os
import open3d as o3d
import numpy as np
import airsim
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import yaml
from airsim import ImageRequest, ImageResponse, LidarData
from airsimcollect.helper.helper import update_airsim_settings
from airsimcollect.helper.helper_transforms import parse_lidarData
from airsimcollect.helper.o3d_util import get_extrinsics, set_view, handle_shapes, update_point_cloud, translate_meshes
from airsimcollect.helper.helper_mesh import (
create_meshes_cuda, update_open_3d_mesh_from_tri_mesh, decimate_column_opc, get_planar_point_density, map_pd_to_decimate_kernel)
from airsimcollect.helper.helper_polylidar import extract_all_dominant_plane_normals, extract_planes_and_polygons_from_mesh
from airsimcollect.segmentation import DEFAULT_REGEX_CODES, set_segmentation_ids
from fastga import GaussianAccumulatorS2Beta, GaussianAccumulatorS2, IcoCharts
from polylidar import MatrixDouble, extract_tri_mesh_from_organized_point_cloud, HalfEdgeTriangulation, Polylidar3D
# Lidar Point Cloud Image
lidar_beams = 64
def set_up_airsim(client):
# connect to the AirSim simulator
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
print("Taking off!")
client.takeoffAsync(timeout_sec=3).join()
print("Increasing altitude 10 meters!")
client.moveToZAsync(-10, 2).join()
print("Reached Altitude, launching Lidar Visualizer")
return client
def get_lidar_data(client: airsim.MultirotorClient):
data:LidarData = client.getLidarData()
points = parse_lidarData(data)
lidar_meta = dict(position=data.pose.position, rotation=data.pose.orientation)
return points, lidar_meta
def get_image_data(client: airsim.MultirotorClient):
response:ImageResponse = client.simGetImage("0", airsim.ImageType.Segmentation)
img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)
# TODO shape should be tuple
img_rgba = img1d.reshape(
response.height, response.width, 4)
plt.imshow(img_rgba)
def update_view(vis):
extrinsics = get_extrinsics(vis)
vis.reset_view_point(True)
set_view(vis, extrinsics)
def main():
# Load yaml file
with open('./assets/config/PolylidarParams.yaml') as file:
try:
config = yaml.safe_load(file)
except yaml.YAMLError as exc:
print("Error parsing yaml")
client = airsim.MultirotorClient()
set_segmentation_ids(client, DEFAULT_REGEX_CODES)
air_sim_settings = update_airsim_settings()
z_col = air_sim_settings['lidar_z_col']
set_up_airsim(client)
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Info)
pcd = o3d.geometry.PointCloud()
# pcd_pd = o3d.geometry.PointCloud()
mesh_noisy = o3d.geometry.TriangleMesh()
mesh_smooth = o3d.geometry.TriangleMesh()
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5)
vis = o3d.visualization.Visualizer()
vis.create_window(width=1280, height=720)
vis.add_geometry(pcd)
# vis.add_geometry(pcd_pd)
vis.add_geometry(mesh_smooth)
vis.add_geometry(axis)
all_polys = []
pl = Polylidar3D(**config['polylidar'])
ga = GaussianAccumulatorS2Beta(level=config['fastga']['level'])
ico = IcoCharts(level=config['fastga']['level'])
path = [airsim.Vector3r(-10, -10, -10), airsim.Vector3r(10, -10, -15), airsim.Vector3r(10, 10, -10), airsim.Vector3r(-10, 10, -15)] * 4
path = [airsim.Vector3r(-5, -5, -10), airsim.Vector3r(5, -5, -15), airsim.Vector3r(5, 5, -10), airsim.Vector3r(-5, 5, -15)] * 4
client.moveOnPathAsync(path, 2.5, 60)
# get_image_data(client)
drone = o3d.io.read_triangle_mesh('assets/stl/drone.ply')
rot = o3d.geometry.get_rotation_matrix_from_xyz([np.pi/2, 0, 0])
drone = drone.rotate(rot, drone.get_center())
drone.paint_uniform_color([1, 0, 0])
drone.compute_triangle_normals()
drone.compute_vertex_normals()
vis.add_geometry(drone)
prev_time = time.time()
with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Info):
while True:
if time.time() - prev_time > 0.1:
points, lidar_meta = get_lidar_data(client)
# print(f"Full Point Cloud Size (including NaNs): {points.shape}")
if np.count_nonzero(~ | np.isnan(points) | numpy.isnan |
import itertools
import numpy as np
from munch import Munch
from plaster.tools.image import imops
from plaster.tools.image.coord import XY
from plaster.tools.ipynb_helpers import displays
from plaster.tools.schema import check
from plaster.tools.utils import stats, utils
from plaster.tools.zplots.zplots import ZPlots
import logging
log = logging.getLogger(__name__)
# Mature
# -------------------------------------------------------------------------------------
def plot_psfs(psfs, scale=1.0, **kwargs):
"""
Show regional PSF in a summary image.
Arguments:
psfs is 4-d array: (regional divs y, regional divs x, psf h, psf w)
"""
divs_h, divs_w, dim_h, dim_w = psfs.shape
assert divs_h == divs_w
divs = divs_h
assert dim_h == dim_w
dim = dim_h
z = ZPlots.zplot_singleton
with z(_size=kwargs.get("_size", max(100, int(dim * divs * scale)))):
comp = | np.zeros((divs * dim, divs * dim)) | numpy.zeros |
from datetime import date, timedelta, datetime
from calendar import monthrange
from random import uniform
import logging
import json
import pandas as pd
import numpy as np
import networkx as nx
import os
import random
from scipy.stats import skewnorm
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from Airport import Airport
from Airline import Airline
from Airplane import Airplane
from Country import Country
class Environment:
PASSENGER_VARIABILITY_PERCENTAGE_PER_DAY = 5
PASSENGER_PERSONAL_BOOKING_SKEW = 0.2 # Negative values are left skewed, positive values are right skewed.
PASSENGER_BUSINESS_BOOKING_SKEW = 8 # Negative values are left skewed, positive values are right skewed.
PASSENGER_PERSONAL_PROBABILITY = 0.5 # Split for personal passengers
DAYS_BEFORE_BOOKING_START = 365
def __init__(self, today=date.today(), world_loc=None, route_loc=None, flow_loc=None):
self.date = today
self.countries = {}
self.airports = {}
self.airlines = {}
self.airplanes = {}
self.distance_graph = {}
self.route_graph = nx.MultiGraph()
self.route_df = pd.DataFrame()
self.passenger_flow_df = pd.DataFrame()
self.passenger_flow_monthly_sum = []
self.passenger_flow_airport_list = []
self.passenger_flow_monthly_weights = []
logger.info("Started initializing ENVIRONMENT...")
if world_loc is not None and route_loc is not None:
self.init_env(world_loc)
self.init_routes(route_loc)
self.init_passenger_data(flow_loc)
else:
logger.error("Environment data file not found")
raise NotImplemented
logger.info("Finished initializing ENVIRONMENT.")
def init_passenger_data(self, file_location):
logger.info("Started initializing passenger demand data...")
self.passenger_flow_df = pd.read_csv(file_location, index_col=0, header=0).astype('int32')
self.passenger_flow_monthly_sum = list(self.passenger_flow_df.sum())
self.passenger_flow_airport_list = list(self.passenger_flow_df.index)
self.passenger_flow_monthly_weights = pd.DataFrame()
for month in range(1, 12 + 1):
self.passenger_flow_monthly_weights[str(month)] = self.passenger_flow_df[str(month)] / \
self.passenger_flow_monthly_sum[month - 1] * 100
logger.info("Finished initializing passenger demand data.")
def init_routes(self, file_location):
logger.info("Started initializing world Routes...")
self.route_df = pd.read_csv(file_location, index_col=0, header=0)
self.route_graph = nx.from_pandas_edgelist(self.route_df, 'source', 'target', 'Distance_Km')
logger.info("Finished initializing world Routes.")
def init_env(self, file_location):
logger.info("Started initializing world...")
world = {}
with open(file_location) as world_json:
world = json.load(world_json)
for airport in world["airports"]:
airport_data = world["airports"][airport]
new_airport = Airport(iata=airport_data["iata"],
name=airport_data["name"],
city=airport_data["city"],
country=airport_data["country"],
lat=airport_data["lat"],
long=airport_data["long"])
self.airports[airport] = new_airport
logger.info("Finished initializing world airports.")
for country in world["countries"]:
new_country = Country(country)
for airport in world["countries"][country]["airports"]:
new_country.add_airport(airport, self.airports[airport])
for airline in world["countries"][country]["airlines"]:
airline_data = world["countries"][country]["airlines"][airline]
new_airline = Airline(name=airline_data["name"],
country=country)
self.airlines[airline] = new_airline
new_country.add_airline(airline, new_airline)
for airplane in world["countries"][country]["airlines"][airline]["airplanes"]:
airplane_data = world["countries"][country]["airlines"][airline]["airplanes"][airplane]
new_airplane = Airplane(identifier=airplane,
airline=airline,
start_airport=airplane_data["source_airport"],
end_airport=airplane_data["destination_airport"],
distance=airplane_data["distance"])
new_airline.add_airplane(airplane, new_airplane)
airplane_tuple = (airplane_data["source_airport"], airplane_data["destination_airport"])
if airplane_tuple not in self.airplanes:
self.airplanes[airplane_tuple] = {airplane: new_airplane}
else:
self.airplanes[airplane_tuple][airplane] = new_airplane
self.countries[country] = new_country
logger.info("Finished initializing world country data.")
logger.info("Finished initializing world.")
def get_demand(self):
if self.date.month in [1, 7, 8, 9, 12]:
return uniform(0.8, 1)
if self.date.month in [4, 5, 6, 10, 11]:
return uniform(0.3, 0.8)
if self.date.month in [2, 3]:
return uniform(0.1, 0.3)
def increment_ticker(self):
self.date += timedelta(1)
def get_month(self):
return self.date.month
def get_number_of_passenger_today(self):
month = self.get_month()
return self.passenger_flow_monthly_sum[month - 1]
def get_transit_airports(self):
return self.passenger_flow_airport_list
def get_transit_airports_weights(self):
month = str(self.get_month())
return self.passenger_flow_monthly_weights[month]
def get_random_path(self):
airports = self.get_transit_airports()
airports_weight = self.get_transit_airports_weights()
return random.choices(airports, weights=airports_weight, k=2)
def generate_passenger_path(self):
path = self.get_random_path()
while path[0] == path[1]:
path = self.get_random_path()
return path
# def do__(self):
# path = self.get_random_path()
# comp_path = nx.dijkstra_path(self.route_graph, source=path[0], target=path[1])
# print(comp_path, path)
@staticmethod
def get_skewed_data(skew, max_value, size):
random_skew = skewnorm.rvs(a=skew, loc=max_value, size=size)
if size != 0:
random_skew -= min(random_skew)
random_skew /= max(random_skew)
random_skew *= random_skew
# plt.hist(random_skew, 365, color='red', alpha=0.1)
# plt.show()
return random_skew
def build_passenger_booking_pattern(self, number_of_days):
timestamp = datetime.now().timestamp()
logger.info("Started building passenger booking pattern...")
day_data = []
normalised_max_value = 1
total_passenger_count = 0
logger.info("Started creating passenger source and destination airport...")
day = 0
month = 1
while day < number_of_days:
passenger_count = self.get_number_of_passenger_today()
for passenger_index in range(passenger_count):
path = self.generate_passenger_path()
path.append(day + 1)
day_data.append(path)
total_passenger_count += passenger_count
logger.info(f"Finished passenger path for day: {day + 1}")
month_end_day = monthrange(self.date.year, self.date.month)[1]
if month_end_day == self.date.day:
logger.info(f"Started saving passenger data for month {month}...")
personal_passenger_count = round(total_passenger_count * self.PASSENGER_PERSONAL_PROBABILITY)
business_passenger_count = total_passenger_count - personal_passenger_count
personal_passenger_skew_booking_day = self.get_skewed_data(skew=self.PASSENGER_PERSONAL_BOOKING_SKEW,
max_value=normalised_max_value,
size=personal_passenger_count)
business_passenger_skew_booking_day = self.get_skewed_data(skew=self.PASSENGER_BUSINESS_BOOKING_SKEW,
max_value=normalised_max_value,
size=business_passenger_count)
prebooked_days_norm = np.append(personal_passenger_skew_booking_day,
business_passenger_skew_booking_day)
is_personal = np.append(np.ones((1, personal_passenger_count)), np.zeros((1, business_passenger_count)))
month_array = | np.full(shape=personal_passenger_count+business_passenger_count, fill_value=month, dtype=np.int) | numpy.full |
# basic dependencies
import os
import sys
import subprocess
from glob import glob
import math
# main dependencies: numpy, nibabel
import numpy
import nibabel
# nighresjava and nighres functions
import nighresjava
from ..io import load_volume, save_volume
from ..utils import _output_dir_4saving, _fname_4saving, \
_check_topology_lut_dir
# convenience labels
X=0
Y=1
Z=2
T=3
def surface_antsreg(source_surface, target_surface,
max_dist=10.0,
run_rigid=True,
rigid_iterations=1000,
run_affine=True,
affine_iterations=1000,
run_syn=True,
coarse_iterations=100,
medium_iterations=70, fine_iterations=20,
cost_function='Demons',
interpolation='Linear',
regularization='Low',
convergence=1e-6,
mask_zero=False,
crop=True,
ignore_affine=False, ignore_header=False,
save_data=False, overwrite=False, output_dir=None,
file_name=None):
""" Embedded ANTS Registration for surfaces
Runs the rigid and/or Symmetric Normalization (SyN) algorithm of ANTs and
formats the output deformations into voxel coordinate mappings as used in
CBSTools registration and transformation routines. Uses all input contrasts
with equal weights.
Parameters
----------
source_surface: niimg
Levelset surface image to register
target_surface: niimg
Reference levelset surface image to match
run_rigid: bool
Whether or not to run a rigid registration first (default is False)
rigid_iterations: float
Number of iterations in the rigid step (default is 1000)
run_affine: bool
Whether or not to run a affine registration first (default is False)
affine_iterations: float
Number of iterations in the affine step (default is 1000)
run_syn: bool
Whether or not to run a SyN registration (default is True)
coarse_iterations: float
Number of iterations at the coarse level (default is 40)
medium_iterations: float
Number of iterations at the medium level (default is 50)
fine_iterations: float
Number of iterations at the fine level (default is 40)
cost_function: {'LeastSquares', 'Demons'}
Cost function for the registration (default is 'Demons')
interpolation: {'NearestNeighbor', 'Linear'}
Interpolation for the registration result (default is 'Linear')
regularization: {'Low', 'Medium', 'High'}
Regularization preset for the SyN deformation (default is 'Medium')
convergence: float
Threshold for convergence, can make the algorithm very slow (default is convergence)
mask_zero: bool
Mask regions with zero value
(default is False)
ignore_affine: bool
Ignore the affine matrix information extracted from the image header
(default is False)
ignore_header: bool
Ignore the orientation information and affine matrix information
extracted from the image header (default is False)
save_data: bool
Save output data to file (default is False)
overwrite: bool
Overwrite existing results (default is False)
output_dir: str, optional
Path to desired output directory, will be created if it doesn't exist
file_name: str, optional
Desired base name for output files with file extension
(suffixes will be added)
Returns
----------
dict
Dictionary collecting outputs under the following keys
(suffix of output files in brackets)
* mapping (niimg): Coordinate mapping from source to target (_ants_map)
* inverse (niimg): Inverse coordinate mapping from target to source (_ants_invmap)
Notes
----------
Port of the CBSTools Java module by <NAME>. The main algorithm
is part of the ANTs software by <NAME> and colleagues [1]_. Parameters
have been set to values commonly found in neuroimaging scripts online, but
not necessarily optimal.
References
----------
.. [1] Avants et al (2008), Symmetric diffeomorphic
image registration with cross-correlation: evaluating automated labeling
of elderly and neurodegenerative brain, Med Image Anal. 12(1):26-41
"""
print('\nEmbedded ANTs Registration Surfaces')
# check if ants is installed to raise sensible error
try:
subprocess.run('antsRegistration', stdout=subprocess.DEVNULL)
except FileNotFoundError:
sys.exit("\nCould not find command 'antsRegistration'. Make sure ANTs is"
" installed and can be accessed from the command line.")
try:
subprocess.run('antsApplyTransforms', stdout=subprocess.DEVNULL)
except FileNotFoundError:
sys.exit("\nCould not find command 'antsApplyTransforms'. Make sure ANTs"
" is installed and can be accessed from the command line.")
# make sure that saving related parameters are correct
# output files needed for intermediate results
output_dir = _output_dir_4saving(output_dir, source_surface)
mapping_file = os.path.join(output_dir,
_fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='ants-map'))
inverse_mapping_file = os.path.join(output_dir,
_fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='ants-invmap'))
if save_data:
if overwrite is False \
and os.path.isfile(mapping_file) \
and os.path.isfile(inverse_mapping_file) :
print("skip computation (use existing results)")
output = {'mapping': mapping_file,
'inverse': inverse_mapping_file}
return output
# cropping and masking do not work well together?
if crop: mask_zero=False
# load and get dimensions and resolution from input images
source = load_volume(source_surface)
# flip the data around, threshold
source_ls = numpy.minimum(numpy.maximum(max_dist - source.get_data(),0.0),2.0*max_dist)
if crop:
# crop images for speed?
src_xmin, src_xmax = numpy.where(numpy.any(source_ls>0.1, axis=(1,2)))[0][[0, -1]]
src_ymin, src_ymax = numpy.where(numpy.any(source_ls>0.1, axis=(0,2)))[0][[0, -1]]
src_zmin, src_zmax = numpy.where(numpy.any(source_ls>0.1, axis=(0,1)))[0][[0, -1]]
source_ls = source_ls[src_xmin:src_xmax+1, src_ymin:src_ymax+1, src_zmin:src_zmax+1]
src_img = nibabel.Nifti1Image(source_ls, source.affine, source.header)
src_img.update_header()
src_img_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_srcimg'))
save_volume(src_img_file, src_img)
source = load_volume(src_img_file)
src_affine = source.affine
src_header = source.header
nsx = source.header.get_data_shape()[X]
nsy = source.header.get_data_shape()[Y]
nsz = source.header.get_data_shape()[Z]
rsx = source.header.get_zooms()[X]
rsy = source.header.get_zooms()[Y]
rsz = source.header.get_zooms()[Z]
orig_src_aff = source.affine
orig_src_hdr = source.header
target = load_volume(target_surface)
# flip the data around
target_ls = numpy.minimum(numpy.maximum(max_dist - target.get_data(),0.0),2.0*max_dist)
if crop:
# crop images for speed?
trg_xmin, trg_xmax = numpy.where(numpy.any(target_ls>0.1, axis=(1,2)))[0][[0, -1]]
trg_ymin, trg_ymax = numpy.where(numpy.any(target_ls>0.1, axis=(0,2)))[0][[0, -1]]
trg_zmin, trg_zmax = numpy.where(numpy.any(target_ls>0.1, axis=(0,1)))[0][[0, -1]]
target_ls = target_ls[trg_xmin:trg_xmax+1, trg_ymin:trg_ymax+1, trg_zmin:trg_zmax+1]
trg_img = nibabel.Nifti1Image(target_ls, target.affine, target.header)
trg_img.update_header()
trg_img_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=target_surface,
suffix='tmp_trgimg'))
save_volume(trg_img_file, trg_img)
target = load_volume(trg_img_file)
trg_affine = target.affine
trg_header = target.header
ntx = target.header.get_data_shape()[X]
nty = target.header.get_data_shape()[Y]
ntz = target.header.get_data_shape()[Z]
rtx = target.header.get_zooms()[X]
rty = target.header.get_zooms()[Y]
rtz = target.header.get_zooms()[Z]
orig_trg_aff = target.affine
orig_trg_hdr = target.header
# in case the affine transformations are not to be trusted: make them equal
if ignore_affine or ignore_header:
# create generic affine aligned with the orientation for the source
new_affine = numpy.zeros((4,4))
if ignore_header:
new_affine[0][0] = rsx
new_affine[1][1] = rsy
new_affine[2][2] = rsz
new_affine[0][3] = -rsx*nsx/2.0
new_affine[1][3] = -rsy*nsy/2.0
new_affine[2][3] = -rsz*nsz/2.0
else:
mx = numpy.argmax(numpy.abs([src_affine[0][0],src_affine[1][0],src_affine[2][0]]))
my = numpy.argmax(numpy.abs([src_affine[0][1],src_affine[1][1],src_affine[2][1]]))
mz = numpy.argmax(numpy.abs([src_affine[0][2],src_affine[1][2],src_affine[2][2]]))
new_affine[mx][0] = rsx*numpy.sign(src_affine[mx][0])
new_affine[my][1] = rsy*numpy.sign(src_affine[my][1])
new_affine[mz][2] = rsz*numpy.sign(src_affine[mz][2])
if (numpy.sign(src_affine[mx][0])<0):
new_affine[mx][3] = rsx*nsx/2.0
else:
new_affine[mx][3] = -rsx*nsx/2.0
if (numpy.sign(src_affine[my][1])<0):
new_affine[my][3] = rsy*nsy/2.0
else:
new_affine[my][3] = -rsy*nsy/2.0
if (numpy.sign(src_affine[mz][2])<0):
new_affine[mz][3] = rsz*nsz/2.0
else:
new_affine[mz][3] = -rsz*nsz/2.0
new_affine[3][3] = 1.0
src_img = nibabel.Nifti1Image(source.get_data(), new_affine, source.header)
src_img.update_header()
src_img_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_srcimg'))
save_volume(src_img_file, src_img)
source = load_volume(src_img_file)
src_affine = source.affine
src_header = source.header
# create generic affine aligned with the orientation for the target
new_affine = numpy.zeros((4,4))
if ignore_header:
new_affine[0][0] = rtx
new_affine[1][1] = rty
new_affine[2][2] = rtz
new_affine[0][3] = -rtx*ntx/2.0
new_affine[1][3] = -rty*nty/2.0
new_affine[2][3] = -rtz*ntz/2.0
else:
#mx = numpy.argmax(numpy.abs(trg_affine[0][0:3]))
#my = numpy.argmax(numpy.abs(trg_affine[1][0:3]))
#mz = numpy.argmax(numpy.abs(trg_affine[2][0:3]))
#new_affine[0][mx] = rtx*numpy.sign(trg_affine[0][mx])
#new_affine[1][my] = rty*numpy.sign(trg_affine[1][my])
#new_affine[2][mz] = rtz*numpy.sign(trg_affine[2][mz])
#if (numpy.sign(trg_affine[0][mx])<0):
# new_affine[0][3] = rtx*ntx/2.0
#else:
# new_affine[0][3] = -rtx*ntx/2.0
#
#if (numpy.sign(trg_affine[1][my])<0):
# new_affine[1][3] = rty*nty/2.0
#else:
# new_affine[1][3] = -rty*nty/2.0
#
#if (numpy.sign(trg_affine[2][mz])<0):
# new_affine[2][3] = rtz*ntz/2.0
#else:
# new_affine[2][3] = -rtz*ntz/2.0
mx = numpy.argmax(numpy.abs([trg_affine[0][0],trg_affine[1][0],trg_affine[2][0]]))
my = numpy.argmax(numpy.abs([trg_affine[0][1],trg_affine[1][1],trg_affine[2][1]]))
mz = numpy.argmax(numpy.abs([trg_affine[0][2],trg_affine[1][2],trg_affine[2][2]]))
#print('mx: '+str(mx)+', my: '+str(my)+', mz: '+str(mz))
#print('rx: '+str(rtx)+', ry: '+str(rty)+', rz: '+str(rtz))
new_affine[mx][0] = rtx*numpy.sign(trg_affine[mx][0])
new_affine[my][1] = rty*numpy.sign(trg_affine[my][1])
new_affine[mz][2] = rtz*numpy.sign(trg_affine[mz][2])
if (numpy.sign(trg_affine[mx][0])<0):
new_affine[mx][3] = rtx*ntx/2.0
else:
new_affine[mx][3] = -rtx*ntx/2.0
if (numpy.sign(trg_affine[my][1])<0):
new_affine[my][3] = rty*nty/2.0
else:
new_affine[my][3] = -rty*nty/2.0
if (numpy.sign(trg_affine[mz][2])<0):
new_affine[mz][3] = rtz*ntz/2.0
else:
new_affine[mz][3] = -rtz*ntz/2.0
#if (numpy.sign(trg_affine[0][mx])<0): new_affine[mx][3] = rtx*ntx
#if (numpy.sign(trg_affine[1][my])<0): new_affine[my][3] = rty*nty
#if (numpy.sign(trg_affine[2][mz])<0): new_affine[mz][3] = rtz*ntz
#new_affine[0][3] = ntx/2.0
#new_affine[1][3] = nty/2.0
#new_affine[2][3] = ntz/2.0
new_affine[3][3] = 1.0
#print("\nbefore: "+str(trg_affine))
#print("\nafter: "+str(new_affine))
trg_img = nibabel.Nifti1Image(target.get_data(), new_affine, target.header)
trg_img.update_header()
trg_img_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_trgimg'))
save_volume(trg_img_file, trg_img)
target = load_volume(trg_img_file)
trg_affine = target.affine
trg_header = target.header
if mask_zero:
# create and save temporary masks
trg_mask_data = (target.get_data()!=0)*(target.get_data()!=2.0*max_dist)
trg_mask = nibabel.Nifti1Image(trg_mask_data, target.affine, target.header)
trg_mask_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_trgmask'))
save_volume(trg_mask_file, trg_mask)
src_mask_data = (source.get_data()!=0)*(source.get_data()!=2.0*max_dist)
src_mask = nibabel.Nifti1Image(src_mask_data, source.affine, source.header)
src_mask_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_srcmask'))
save_volume(src_mask_file, src_mask)
# run the main ANTS software: here we directly build the command line call
reg = 'antsRegistration --collapse-output-transforms 1 --dimensionality 3' \
+' --initialize-transforms-per-stage 0 --interpolation Linear'
# add a prefix to avoid multiple names?
prefix = _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_syn')
prefix = os.path.basename(prefix)
prefix = prefix.split(".")[0]
#reg.inputs.output_transform_prefix = prefix
reg = reg+' --output '+prefix
if mask_zero:
reg = reg+' --masks ['+trg_mask_file+', '+src_mask_file+']'
srcfiles = []
trgfiles = []
print("registering "+source.get_filename()+"\n to "+target.get_filename())
srcfiles.append(source.get_filename())
trgfiles.append(target.get_filename())
weight = 1.0/len(srcfiles)
# set parameters for all the different types of transformations
if run_rigid is True:
reg = reg + ' --transform Rigid[0.1]'
for idx,img in enumerate(srcfiles):
reg = reg + ' --metric MeanSquares['+trgfiles[idx]+', '+srcfiles[idx] \
+', '+'{:.3f}'.format(weight)+', 0, Random, 0.3 ]'
reg = reg + ' --convergence ['+str(rigid_iterations)+'x' \
+str(rigid_iterations)+'x'+str(rigid_iterations) \
+', '+str(convergence)+', 5 ]'
reg = reg + ' --smoothing-sigmas 4.0x2.0x0.0'
reg = reg + ' --shrink-factors 16x4x1'
reg = reg + ' --use-histogram-matching 0'
#reg = reg + ' --winsorize-image-intensities [ 0.001, 0.999 ]'
if run_affine is True:
reg = reg + ' --transform Affine[0.1]'
for idx,img in enumerate(srcfiles):
reg = reg + ' --metric MeanSquares['+trgfiles[idx]+', '+srcfiles[idx] \
+', '+'{:.3f}'.format(weight)+', 0, Random, 0.3 ]'
reg = reg + ' --convergence ['+str(affine_iterations)+'x' \
+str(affine_iterations)+'x'+str(affine_iterations) \
+', '+str(convergence)+', 5 ]'
reg = reg + ' --smoothing-sigmas 4.0x2.0x0.0'
reg = reg + ' --shrink-factors 16x4x1'
reg = reg + ' --use-histogram-matching 0'
#reg = reg + ' --winsorize-image-intensities [ 0.001, 0.999 ]'
if run_syn is True:
if regularization == 'Low': syn_param = [0.1, 1.0, 0.0]
elif regularization == 'Medium': syn_param = [0.1, 3.0, 0.0]
elif regularization == 'High': syn_param = [0.2, 4.0, 3.0]
else: syn_param = [0.1, 3.0, 0.0]
reg = reg + ' --transform SyN'+str(syn_param)
if (cost_function=='Demons'):
for idx,img in enumerate(srcfiles):
reg = reg + ' --metric Demons['+trgfiles[idx]+', '+srcfiles[idx] \
+', '+'{:.3f}'.format(weight)+', 4, Random, 0.3 ]'
else:
for idx,img in enumerate(srcfiles):
reg = reg + ' --metric MeanSquares['+trgfiles[idx]+', '+srcfiles[idx] \
+', '+'{:.3f}'.format(weight)+', 0, Random, 0.3 ]'
reg = reg + ' --convergence ['+str(coarse_iterations)+'x' \
+str(coarse_iterations)+'x'+str(medium_iterations)+'x' \
+str(medium_iterations)+'x' \
+str(fine_iterations)+', '+str(convergence)+', 5 ]'
reg = reg + ' --smoothing-sigmas 9.0x6.0x3.0x1.0x0.0'
reg = reg + ' --shrink-factors 16x8x4x2x1'
reg = reg + ' --use-histogram-matching 0'
#reg = reg + ' --winsorize-image-intensities [ 0.001, 0.999 ]'
if run_rigid is False and run_affine is False and run_syn is False:
reg = reg + ' --transform Rigid[0.1]'
for idx,img in enumerate(srcfiles):
reg = reg + ' --metric CC['+trgfiles[idx]+', '+srcfiles[idx] \
+', '+'{:.3f}'.format(weight)+', 5, Random, 0.3 ]'
reg = reg + ' --convergence [ 0x0x0, 1.0, 2 ]'
reg = reg + ' --smoothing-sigmas 3.0x2.0x1.0'
reg = reg + ' --shrink-factors 4x2x1'
reg = reg + ' --use-histogram-matching 0'
#reg = reg + ' --winsorize-image-intensities [ 0.001, 0.999 ]'
reg = reg + ' --write-composite-transform 0'
# run the ANTs command directly
print(reg)
try:
subprocess.check_output(reg, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = 'execution failed (error code '+str(e.returncode)+')\n Output: '+str(e.output)
raise subprocess.CalledProcessError(msg)
# output file names
results = sorted(glob(prefix+'*'))
forward = []
flag = []
for res in results:
if res.endswith('GenericAffine.mat'):
forward.append(res)
flag.append(False)
elif res.endswith('Warp.nii.gz') and not res.endswith('InverseWarp.nii.gz'):
forward.append(res)
flag.append(False)
#print('forward transforms: '+str(forward))
inverse = []
linear = []
for res in results[::-1]:
if res.endswith('GenericAffine.mat'):
inverse.append(res)
linear.append(True)
elif res.endswith('InverseWarp.nii.gz'):
inverse.append(res)
linear.append(False)
#print('inverse transforms: '+str(inverse))
#transform input (for checking)
# src_at = 'antsApplyTransforms --dimensionality 3 --input-image-type 3'
# src_at = src_at+' --input '+source.get_filename()
# src_at = src_at+' --reference-image '+target.get_filename()
# src_at = src_at+' --interpolation Linear'
# for idx,transform in enumerate(forward):
# if flag[idx]:
# src_at = src_at+' --transform ['+transform+', 1]'
# else:
# src_at = src_at+' --transform ['+transform+', 0]'
# src_at = src_at+' --output '+mapping_file
#
# print(src_at)
# try:
# subprocess.check_output(src_at, shell=True, stderr=subprocess.STDOUT)
# except subprocess.CalledProcessError as e:
# msg = 'execution failed (error code '+e.returncode+')\n Output: '+e.output
# raise subprocess.CalledProcessError(msg)
# Create forward coordinate mapping
src_coord = numpy.zeros((nsx,nsy,nsz,3))
src_coord[:,:,:,0] = numpy.expand_dims(numpy.expand_dims(numpy.array(range(nsx)),1),2) \
*numpy.ones((1,nsy,1))*numpy.ones((1,1,nsz))
src_coord[:,:,:,1] = numpy.ones((nsx,1,1))*numpy.expand_dims(numpy.expand_dims(numpy.array(range(nsy)),0),2) \
*numpy.ones((1,1,nsz))
src_coord[:,:,:,2] = numpy.ones((nsx,1,1))*numpy.ones((1,nsy,1)) \
*numpy.expand_dims(numpy.expand_dims(numpy.array(range(nsz)),0),1)
src_map = nibabel.Nifti1Image(src_coord, source.affine, source.header)
src_map_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_srccoord'))
save_volume(src_map_file, src_map)
src_at = 'antsApplyTransforms --dimensionality 3 --input-image-type 3'
src_at = src_at+' --input '+src_map.get_filename()
src_at = src_at+' --reference-image '+target.get_filename()
src_at = src_at+' --interpolation Linear'
for idx,transform in enumerate(forward):
if flag[idx]:
src_at = src_at+' --transform ['+transform+', 1]'
else:
src_at = src_at+' --transform ['+transform+', 0]'
src_at = src_at+' --output '+mapping_file
print(src_at)
try:
subprocess.check_output(src_at, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = 'execution failed (error code '+e.returncode+')\n Output: '+e.output
raise subprocess.CalledProcessError(msg)
# uncrop if needed
if crop:
orig = load_volume(target_surface)
nx = orig.header.get_data_shape()[X]
ny = orig.header.get_data_shape()[Y]
nz = orig.header.get_data_shape()[Z]
coord = -numpy.ones((nx,ny,nz,3))
mapping = load_volume(mapping_file).get_data()
coord[trg_xmin:trg_xmax+1, trg_ymin:trg_ymax+1, trg_zmin:trg_zmax+1, 0] = mapping[:,:,:,0] + src_xmin
coord[trg_xmin:trg_xmax+1, trg_ymin:trg_ymax+1, trg_zmin:trg_zmax+1, 1] = mapping[:,:,:,1] + src_ymin
coord[trg_xmin:trg_xmax+1, trg_ymin:trg_ymax+1, trg_zmin:trg_zmax+1, 2] = mapping[:,:,:,2] + src_zmin
coord_img = nibabel.Nifti1Image(coord, orig.affine, orig.header)
save_volume(mapping_file, coord_img)
# Create backward coordinate mapping
trg_coord = numpy.zeros((ntx,nty,ntz,3))
trg_coord[:,:,:,0] = numpy.expand_dims(numpy.expand_dims(numpy.array(range(ntx)),1),2) \
*numpy.ones((1,nty,1))*numpy.ones((1,1,ntz))
trg_coord[:,:,:,1] = numpy.ones((ntx,1,1))*numpy.expand_dims(numpy.expand_dims(numpy.array(range(nty)),0),2) \
*numpy.ones((1,1,ntz))
trg_coord[:,:,:,2] = numpy.ones((ntx,1,1))* | numpy.ones((1,nty,1)) | numpy.ones |
import os
import pickle
import matplotlib.collections as mcoll
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from preprocessing.classes.base.Generator import Generator
class ScanPathsGenerator(Generator):
def __init__(self):
super().__init__("scanpaths")
self.__use_temp_grad = self._params["temporal_gradient"]
self.__paths_to_sequences = self._paths.create_paths(self._params["path_to_src"])
self.__paths_to_scan_paths = self._paths.create_paths(self._params["path_to_dest"])
def __generate(self, path_to_src: str, path_to_destination: str):
sequences_files = os.listdir(path_to_src)
for file_name in tqdm(sequences_files, desc="Generating scan-paths at {}".format(path_to_destination)):
item = pickle.load(open(os.path.join(path_to_src, file_name), "rb")).values
# Filter out NaN
item = item[item[:, 0] != -1.0]
# Take "GazePointX (ADCSpx)" and "GazePointY (ADCSpx)"
x, y = item[:, 4], item[:, 5]
if self.__use_temp_grad:
_, _ = plt.subplots()
self.__color_lines(x, y)
plt.scatter(x, y, c="k", s=1, vmin=0, vmax=1050, alpha=0.0)
else:
plt.scatter(x, y, vmin=0, vmax=1050)
plt.plot(x, y)
plt.axis('off')
plt.savefig(os.path.join(path_to_destination, file_name.replace("pkl", "png")), bbox_inches='tight')
plt.clf()
@staticmethod
def __color_lines(x: np.ndarray, y: np.ndarray):
path = mpath.Path( | np.column_stack([x, y]) | numpy.column_stack |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 20 18:43:11 2020
@author: Christian
"""
import numpy as np
from .solutionClass import Individual
# =============================================================================
# Fitness functions
# =============================================================================
def basicFitness(individual, env):
"""
The trivial case, where fitness is just the result of passing through
the environment.
"""
return individual.result
# =============================================================================
# Rank populations
# =============================================================================
"""
Thes functions are used to rank the score of individuals within a population.
"""
def rankFitness(scores, Npop):
"""
Assign a rank to each item in the population, with low scores being
assigned the first rank.
[0, 1, 100, 20, 0.1, 5] - > [0, 2, 5, 4, 1, 3]
Parameters
----------
scores : list
A list of input scores input scores.
Npop : int
THe number of individuals in the population.
Returns
-------
float
The ranks in order of lowest output to highest output.
"""
populationRanks = np.zeros(Npop)
# sort the array, then create an array of ranks
sortedIndexes = scores.argsort()
populationRanks[sortedIndexes] = np.arange(Npop)
return populationRanks
def rankFitnessInversed(scores, Npop):
"""
Assign a rank to each item in the population, with high scores being assigned
the highest rank.
[0, 1, 100, 20, 0.1, 5] - > [5, 3, 0, 1, 4, 2]
Parameters
----------
scores : list
A list of input scores input scores.
Npop : int
THe number of individuals in the population.
Returns
-------
float
The ranks in order of highest output to lowest output.
"""
ranks = rankFitness(scores, Npop)
return Npop - ranks
# =============================================================================
#
# =============================================================================
"""
Ranked Fitness probabilities
Defines the probability that a parent is chosen.
probabilities are represented as a cumulative distrtibution
Defines the likilhood a gene is chosen for reproduction. The input scores
can have any positive values.
Each point in the output array has width equal to the liklihood of it being
chosen.
"""
def _parseStrategyInputs(strategy):
"""
Parses the inputs. If a integer is passed in, we sleect the appropriate
strategy. Otherwise we pass in the custom strategy directly
"""
crossoverStrategies = {0:rankedRouletteFitnessProbs,
1:rouletteFitnessProbs,
}
if isinstance(strategy, int):
if strategy not in crossoverStrategies.keys():
raise Exception('Invalid strategy Index Provided')
crossoverStrategy = crossoverStrategies[strategy]
elif callable(strategy):
crossoverStrategy = strategy
return crossoverStrategy
def getProbStrategy(strategy = 0):
"""
A functions that returns the desired probability defintion function. It is
possible to choose from a set of predefined functions, or use a custom
function.
When chosing from predefined functions, an integer is passed.
Custom probability functions will returna cumulative distribution for a
set of input individuals.
Parameters
----------
crossoverStrategy : int, optional, or function
The cross over strategy to use. The current strategies supported are
0: rankedRouletteFitnessProbs
1: rouletteFitnessProbs
The default is rankedRouletteFitnessProbs.
Returns
-------
probabilityFunction: function
The function used to define the cumulative distribution for a set
of scores in the population.
"""
return _parseStrategyInputs(strategy)
def rankedRouletteFitnessProbs(scores):
"""
Assigns probabilities of being chosen to a set of scores, based on the rank
of each score in the population.
The probability of each selection is assigned with the goal of minimizing
the input score.
Parameters
----------
scores : list
The input fitness/score of each individual after it's passed through
the environment and processed by the fitness function.
Returns
-------
probs : list
The output cumulative distribution is for the scores.
"""
Npop = len(scores)
populationRanks = rankFitness(scores, Npop)
wheelAreas = Npop - populationRanks
cumulativeFitness = np.cumsum(wheelAreas)
probs = cumulativeFitness / cumulativeFitness[-1]
return probs
def rouletteFitnessProbs(scores):
"""
Assigns probabilities of being chosen to a set of scores, based on the
score of each individual in the population.
The probability of each selection is assigned with the goal of minimizing
the input score.
Parameters
----------
scores : list
The input fitness/score of each individual after it's passed through
the environment and processed by the fitness function.
Returns
-------
probs : list
The output cumulative distribution is for the scores.
"""
Npop = len(scores)
populationRanks = rankFitness(scores, Npop)
wheelAreas = Npop - populationRanks
cumulativeFitness = np.cumsum(wheelAreas)
probs = cumulativeFitness / cumulativeFitness[-1]
return probs
def pick_Individual(population, probs):
"""
Select a member of the population at random depending on a input
cumulative probability distribution.
Parameters
----------
population : list
The input population of individuals.
probs : array
The cumulative probability distribution for the population.
Returns
-------
selection : individual
The selected individual from the population.
"""
#
rand = | np.random.random() | numpy.random.random |
import time, os, csv, random
import cma
import numpy as np
import pandas as pd
from scipy.linalg import cholesky
from numpy.linalg import LinAlgError
from numpy.random import standard_normal
from sklearn.decomposition import PCA
from copy import deepcopy
import matplotlib.pyplot as plt
from nevergrad.functions import corefuncs
import tensorflow as tf
def output_data(filename, data):
file = open(filename, 'w', newline='', encoding='utf-8')
csv_writer = csv.writer(file, delimiter='\n', quotechar=' ', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(data)
file.close()
# Vanilla ES framework to estimate gradients
def es_compute_grads(x, loss_fn, sigma=0.01, pop_size=10):
grad = 0
for i in range(pop_size):
noise = sigma / np.sqrt(len(x)) * np.random.randn(1, len(x))
noise = noise.reshape(x.shape)
grad += noise * (loss_fn(x + noise) - loss_fn(x - noise))
g_hat = grad / (2 * pop_size * sigma ** 2)
return g_hat
# Using Vanilla ES to estimate the gradient.
def es(x_init, loss_fn, lr=0.2, sigma=0.01, pop_size=10, max_samples=int(1e5)):
x = deepcopy(x_init)
xs, ys, ts, errors = [0], [loss_fn(x)], [0.], [0.]
total_sample, current_iter = 0, 0
while total_sample < max_samples:
time_st = time.time()
g_hat = es_compute_grads(x, loss_fn, sigma=sigma, pop_size=pop_size)
errors.append(np.dot(2*x, g_hat)/(np.linalg.norm(2*x) * | np.linalg.norm(g_hat) | numpy.linalg.norm |
import numpy as np
from collections import defaultdict
from collections import OrderedDict
import heapq
def measure_k(probs, true_ys, k_list=[1, 3, 5]):
max_k = np.max(k_list)
num_samples = np.size(true_ys, 0)
precision_k = defaultdict(float)
dgg_k = defaultdict(float)
ndgg_k = defaultdict(float)
for i in range(num_samples):
prob = probs[i, :]
true_y = true_ys[i, :]
prob = list(zip(prob, range(len(prob))))
max_k_largest_index = [x[1] for x in heapq.nlargest(max_k, prob, key=lambda x: x[0])]
for k in k_list:
precision_k[k] += np.sum(true_y[max_k_largest_index[0:k]])/k
dgg_k[k] += np.sum(true_y[max_k_largest_index[0:k]] / np.log2(2+np.arange(k)))
for k in k_list:
precision_k[k] /= num_samples
dgg_k[k] /= num_samples
ndgg_k[k] = dgg_k[k] / np.sum(1/np.log2(2+np.arange(k)))
return precision_k, dgg_k, ndgg_k
def measure_b(pred_b, y):
epsilon = 1e-9
#micro
tp = np.sum(np.logical_and(pred_b, y))
fp = np.sum(np.logical_and(pred_b, np.logical_not(y)))
fn = np.sum(np.logical_and(np.logical_not(pred_b), y))
micro_p = tp/(tp+fp+epsilon)
micro_r = tp/(tp+fn+epsilon)
micor_f1 = 2*micro_p*micro_r/(micro_p+micro_r)
#marco
tp = np.sum(np.logical_and(pred_b, y), 0)
fp = np.sum(np.logical_and(pred_b, np.logical_not(y)), 0)
fn = np.sum(np.logical_and(np.logical_not(pred_b), y), 0)
marco_p = np.mean(tp/(tp+fp+epsilon))
marco_r = np.mean(tp/(tp+fn+epsilon))
marco_f1 = 2*marco_p*marco_r/(marco_p+marco_r)
#Example based measures
hamming_loss = np.mean(np.logical_xor(pred_b, y))
accuracy = np.mean(np.sum(np.logical_and(pred_b, y), 1)/np.sum(np.logical_or(pred_b, y), 1))
precision = np.mean(np.sum(np.logical_and(pred_b, y), 1)/np.sum(pred_b, 1))
recall = np.mean(np.sum( | np.logical_and(pred_b, y) | numpy.logical_and |
import pickle
from collections import OrderedDict
import numpy as np
import numpy.linalg as npl
from sgr_analysis.analysis_utils import read_snapshot_file
from sgr_analysis.parse_outputs_snapshot_method_dependence import methods
from sgr_analysis.parse_outputs_snapshot_method_dependence import basis_sets
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
colors = OrderedDict([
('blyp', 'blue'),
('tpss', 'cyan'),
('b3lyp', 'red'),
('wb97x-d', 'orange'),
('hf', 'black'),
('ri-mp2', 'grey'),
])
linestyles = OrderedDict([
('6-31gdp', '-'),
('cc-pvtz', '--'),
])
def get_single_snapshot_results(snapnum, snapnums_dict, results_dict):
"""For a given snapshot number, get the single 'result' (frequency,
intensity, dipole, etc.) corresponding to it, maintaining the original
dictionary structure.
"""
single_dict = dict()
assert snapnums_dict.keys() == results_dict.keys()
for method in methods:
assert snapnums_dict[method].keys() == results_dict[method].keys()
single_dict[method] = dict()
for basis_set in basis_sets:
assert snapnums_dict[method][basis_set].keys() == results_dict[method][basis_set].keys()
single_dict[method][basis_set] = dict()
for n_qm in snapnums_dict[method][basis_set]:
assert snapnums_dict[method][basis_set][n_qm].keys() == results_dict[method][basis_set][n_qm].keys()
single_dict[method][basis_set][n_qm] = dict()
for n_mm in snapnums_dict[method][basis_set][n_qm]:
if len(snapnums_dict[method][basis_set][n_qm][n_mm]) > 0:
idx = snapnums_dict[method][basis_set][n_qm][n_mm].index(snapnum)
single_result = results_dict[method][basis_set][n_qm][n_mm][idx]
single_dict[method][basis_set][n_qm][n_mm] = single_result
return single_dict
def plot_single_snapshot_dipoles(snapnum, snapnums_d, dipoles, inp_fig=None, inp_ax=None):
"""Plot the dipole moment of a single snapshot as a function of its
method and basis set.
"""
dipoles_snap = get_single_snapshot_results(snapnum, snapnums_d, dipoles)
fig, ax = plt.subplots()
if inp_fig:
fig = inp_fig
if inp_ax:
ax = inp_ax
for basis_set in basis_sets:
plot_list = [npl.norm(dipoles_snap[method][basis_set][0][0])
for method in methods]
ax.plot(ticks_methods,
plot_list,
label=basis_sets[basis_set],
marker='o')
if not inp_ax:
ax.tick_params(direction='out', top='off', right='off')
ax.set_xticklabels(xticklabels_methods)
ax.set_xlabel('method', fontsize='large')
ax.set_ylabel("total dipole moment (Debye)", fontsize='large')
ax.set_title("snapshot {}".format(snapnum), fontsize='large')
ax.legend(loc='best', fancybox=True, framealpha=0.50)
if not inp_fig:
filename = 'dipole_snap{}.pdf'.format(snapnum)
print('Saving {}'.format(filename))
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
return
def plot_single_snapshot_frequencies(snapnum, snapnums_f, frequencies, inp_fig=None, inp_ax=None):
"""Plot the CO2 v3 frequency of a single snapshot as a function of its
method and basis set.
"""
frequencies_snap = get_single_snapshot_results(snapnum, snapnums_f, frequencies)
fig, ax = plt.subplots()
if inp_fig:
fig = inp_fig
if inp_ax:
ax = inp_ax
for basis_set in basis_sets:
plot_list = [frequencies_snap[method][basis_set][0][0]
for method in methods]
ax.plot(ticks_methods,
plot_list,
label=basis_sets[basis_set],
marker='o')
if not inp_ax:
ax.tick_params(direction='out', top='off', right='off')
ax.set_xticklabels(xticklabels_methods)
ax.set_xlabel('method', fontsize='large')
ax.set_ylabel(r'$\nu_{3}$ frequency (cm$^{-1}$)', fontsize='large')
ax.set_title("snapshot {}".format(snapnum), fontsize='large')
ax.legend(loc='best', fancybox=True, framealpha=0.50)
if not inp_fig:
filename = 'frequency_snap{}.pdf'.format(snapnum)
print('Saving {}'.format(filename))
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
return
if __name__ == '__main__':
# Read in the pickle files that contain all the raw data.
with open('frequencies.pypickle', 'rb') as picklefile:
frequencies = pickle.load(picklefile)
with open('dipoles.pypickle', 'rb') as picklefile:
dipoles = pickle.load(picklefile)
with open('snapnums_frequencies.pypickle', 'rb') as picklefile:
snapnums_f = pickle.load(picklefile)
with open('snapnums_dipoles.pypickle', 'rb') as picklefile:
snapnums_d = pickle.load(picklefile)
ticks_methods = range(len(methods))
xticklabels_methods = list(methods[method] for method in methods)
means_frequencies = [ | np.mean(frequencies[method]['6-31gdp'][0][0]) | numpy.mean |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import warnings
from matplotlib import __version__
from matplotlib.cbook import maxdict
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, register_backend
from matplotlib.path import Path
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.texmanager import TexManager
import numpy as np
import gr
linetype = {'solid': 1, 'dashed': 2, 'dashdot': 4, 'dotted': 3}
os.environ['GKS_DOUBLE_BUF'] = '1'
class RendererGR(RendererBase):
"""
Handles drawing/rendering operations using GR
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
self.dpi = dpi
if __version__[0] >= '2':
self.nominal_fontsize = 0.001625
default_dpi = 100
else:
self.nominal_fontsize = 0.0013
default_dpi = 80
self.width = float(width) * dpi / default_dpi
self.height = float(height) * dpi / default_dpi
self.mathtext_parser = MathTextParser('agg')
self.texmanager = TexManager()
def configure(self):
aspect_ratio = self.width / self.height
if aspect_ratio > 1:
rect = np.array([0, 1, 0, 1.0 / aspect_ratio])
self.size = self.width
else:
rect = np.array([0, aspect_ratio, 0, 1])
self.size = self.height
mwidth, mheight, width, height = gr.inqdspsize()
if width / (mwidth / 0.0256) < 200:
mwidth *= self.width / width
gr.setwsviewport(*rect * mwidth)
else:
gr.setwsviewport(*rect * 0.192)
gr.setwswindow(*rect)
gr.setviewport(*rect)
gr.setwindow(0, self.width, 0, self.height)
def draw_path(self, gc, path, transform, rgbFace=None):
path = transform.transform_path(path)
points = path.vertices
codes = path.codes
bbox = gc.get_clip_rectangle()
if bbox is not None and not np.any(np.isnan(bbox.bounds)):
x, y, w, h = bbox.bounds
clrt = np.array([x, x + w, y, y + h])
else:
clrt = np.array([0, self.width, 0, self.height])
gr.setviewport(*clrt / self.size)
gr.setwindow(*clrt)
if rgbFace is not None and len(points) > 2:
color = gr.inqcolorfromrgb(rgbFace[0], rgbFace[1], rgbFace[2])
gr.settransparency(rgbFace[3])
gr.setcolorrep(color, rgbFace[0], rgbFace[1], rgbFace[2])
gr.setfillintstyle(gr.INTSTYLE_SOLID)
gr.setfillcolorind(color)
gr.drawpath(points, codes, fill=True)
lw = gc.get_linewidth()
if lw != 0:
rgba = gc.get_rgb()[:4]
color = gr.inqcolorfromrgb(rgba[0], rgba[1], rgba[2])
gr.settransparency(rgba[3])
gr.setcolorrep(color, rgba[0], rgba[1], rgba[2])
if isinstance(gc._linestyle, str):
gr.setlinetype(linetype[gc._linestyle])
gr.setlinewidth(lw)
gr.setlinecolorind(color)
gr.drawpath(points, codes, fill=False)
def draw_image(self, gc, x, y, im):
if hasattr(im, 'as_rgba_str'):
h, w, s = im.as_rgba_str()
img = np.fromstring(s, np.uint32)
img.shape = (h, w)
elif len(im.shape) == 3 and im.shape[2] == 4 and im.dtype == np.uint8:
img = im.view(np.uint32)
img.shape = im.shape[:2]
h, w = img.shape
else:
type_info = repr(type(im))
if hasattr(im, 'shape'):
type_info += ' shape=' + repr(im.shape)
if hasattr(im, 'dtype'):
type_info += ' dtype=' + repr(im.dtype)
warnings.warn('Unsupported image type ({}). Please report this at https://github.com/sciapp/python-gr/issues'.format(type_info))
return
gr.drawimage(x, x + w, y + h, y, w, h, img)
def draw_mathtext(self, x, y, angle, Z):
h, w = Z.shape
img = np.zeros((h, w), np.uint32)
for i in range(h):
for j in range(w):
img[i, j] = (255 - Z[i, j]) << 24
a = int(angle)
if a == 90:
gr.drawimage(x - h, x, y, y + w, h, w,
np.resize(np.rot90(img, 1), (h, w)))
elif a == 180:
gr.drawimage(x - w, x, y - h, y, w, h, np.rot90(img, 2))
elif a == 270:
gr.drawimage(x, x + h, y - w, y, h, w,
np.resize(np.rot90(img, 3), (h, w)))
else:
gr.drawimage(x, x + w, y, y + h, w, h, img)
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
size = prop.get_size_in_points()
key = s, size, self.dpi, angle, self.texmanager.get_font_config()
im = self.texd.get(key)
if im is None:
Z = self.texmanager.get_grey(s, size, self.dpi)
Z = np.array(255.0 - Z * 255.0, np.uint8)
self.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
self.draw_mathtext(x, y, angle, 255 - np.asarray(image))
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
x, y = gr.wctondc(x, y)
fontsize = prop.get_size_in_points()
rgba = gc.get_rgb()[:4]
color = gr.inqcolorfromrgb(rgba[0], rgba[1], rgba[2])
gr.settransparency(rgba[3])
gr.setcolorrep(color, rgba[0], rgba[1], rgba[2])
gr.setcharheight(fontsize * self.nominal_fontsize)
gr.settextcolorind(color)
if angle != 0:
gr.setcharup(- | np.sin(angle * np.pi/180) | numpy.sin |
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.patches as patch
from matplotlib import ticker
import numpy as np
from astropy.io import fits
from copy import copy
from argparse import ArgumentParser
parser = ArgumentParser(prog='Plot continuum', description='Free free emission from HII regions')
parser.add_argument('-i', '--id', help='id of fits file to be plotted')
args = parser.parse_args()
if args.id is None: id = ''
else: id = args.id
tag = 'img_HII_env+disc_'+id
#------------------
#LOADING DATA
#------------------
if id: image_cont_349 = 'img_HII_env+disc-'+id+'.fits'
else: image_cont_349 = 'img_HII_env+disc.fits'
data_349, header_349 = fits.getdata(image_cont_349, header=True)
if id == 'CONV_noise': pass
else: data_349 = data_349[0]
#-------------------------
#RESOLUTION AND IMAGE SIZE
#-------------------------
distance = 4000 # parsecs
x_lim = 4000 #AUs
extent = [-x_lim, x_lim, -x_lim, x_lim]
#-----------
#CONT. DATA
#-----------
Npix_349 = header_349['NAXIS1']
resolution_349 = abs(header_349['CDELT1']) * 3600 # in arcsecs
#x_lim = (round (Npix / 2.) - x_init) * resolution * distance
x_inits = int(round (round (Npix_349 / 2.) - (x_lim / (resolution_349 * distance)) ))
x_ends = Npix_349 - x_inits
#-----
#BEAM
#-----
header = header_349
i = 0
ellipse = 0
flag_beam = False
try:
if header['BMAJ']: pass
a, b = np.array([header['BMAJ'] , header['BMIN']]) * 3600 * distance
if id == 'CONV': f_c, e_c = 'gray', 'white'
elif id == 'CONV_noise': f_c, e_c = 'black', 'white'
ellipse = patch.Ellipse(xy = (-3500,-3500), angle = 90 + header['BPA'],
width = a, height = b, linewidth = 1,
fill = True, facecolor = f_c, edgecolor = e_c)
flag_beam = True
except KeyError: pass
#-----
#-----
#-----------------------------
#COLORS IN LIMITS OF COLORBARS
#-----------------------------
#Use copy so that we don't mutate the global colormap instance
colormm = plt.cm.hot
palette_hot = copy(colormm)
palette_hot.set_over('red', 0.8)#colormm(255), 0.8)
if id == 'CONV_noise': palette_hot.set_under('gray', 0.9)#colormm(0), 0.8)
else: palette_hot.set_under('black', 1.0)#colormm(0), 0.8)
colormm = plt.cm.cubehelix_r
palette_helix = copy(colormm)
palette_helix.set_over('black', 1)#colormm(255), 0.8)
palette_helix.set_under('white', 1)#colormm(0), 0.8)
#-----------------------------
#-----------------------------
#---------
#PLOTTING
#---------
images = data_349
fig, ax = plt.subplots(nrows=1, ncols=1) #, figsize=(8, 4.5))
if id == 'CONV_noise': ax.set_facecolor('darkgray')
else: ax.set_facecolor('black')
im = | np.ones((3,3)) | numpy.ones |
import numpy as np
import pdb
import pywt
## This file is imported from the modwt wavelet transform provided at:
## https://github.com/pistonly/modwtpy
## It appears that pywavelet does not have the maximal ovalap discrete wavelet transform.
def upArrow_op(li, j):
if j == 0:
return [1]
N = len(li)
li_n = np.zeros(2 ** (j - 1) * (N - 1) + 1)
for i in range(N):
li_n[2 ** (j - 1) * i] = li[i]
return li_n
def period_list(li, N):
n = len(li)
# append [0 0 ...]
n_app = N - np.mod(n, N)
li = list(li)
li = li + [0] * n_app
if len(li) < 2 * N:
return np.array(li)
else:
li = np.array(li)
li = np.reshape(li, [-1, N])
li = np.sum(li, axis=0)
return li
def circular_convolve_mra(h_j_o, w_j):
''' calculate the mra D_j'''
N = len(w_j)
l = np.arange(N)
D_j = np.zeros(N)
for t in range(N):
index = np.mod(t + l, N)
w_j_p = np.array([w_j[ind] for ind in index])
D_j[t] = (np.array(h_j_o) * w_j_p).sum()
return D_j
def circular_convolve_d(h_t, v_j_1, j):
'''
jth level decomposition
h_t: \tilde{h} = h / sqrt(2)
v_j_1: v_{j-1}, the (j-1)th scale coefficients
return: w_j (or v_j)
'''
N = len(v_j_1)
L = len(h_t)
w_j = np.zeros(N)
l = np.arange(L)
for t in range(N):
index = np.mod(t - 2 ** (j - 1) * l, N)
v_p = np.array([v_j_1[ind] for ind in index])
w_j[t] = (np.array(h_t) * v_p).sum()
return w_j
def circular_convolve_s(h_t, g_t, w_j, v_j, j):
'''
(j-1)th level synthesis from w_j, w_j
see function circular_convolve_d
'''
N = len(v_j)
L = len(h_t)
v_j_1 = np.zeros(N)
l = | np.arange(L) | numpy.arange |
import itertools
import warnings
from inspect import signature
from timeit import default_timer
from sklearn.preprocessing import normalize
import dask
import numpy as np
try:
import shap
except:
msg = "SHAP not found, therefore using SHAP-values for feature importance not available."
warnings.warn(msg)
shap = None
from dask import delayed
from networkx import NetworkXUnfeasible, find_cycle, topological_sort
from sklearn.ensemble import (
ExtraTreesClassifier,
ExtraTreesRegressor,
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.impute import SimpleImputer
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..algo import (
evaluation,
imputation,
inference,
inference_v3,
new_inference,
new_prediction,
selection,
vector_prediction,
)
from ..algo.induction import base_induction_algorithm, expand_induction_algorithm
from ..composition import CompositeModel, NewCompositeModel, o, x
from ..graph import build_diagram, compose_all, get_targ, model_to_graph
from ..utils import (
DESC_ENCODING,
MISS_ENCODING,
TARG_ENCODING,
DecoratedDecisionTreeClassifier,
DecoratedDecisionTreeRegressor,
DecoratedRandomForestClassifier,
DecoratedRandomForestRegressor,
code_to_query,
get_i_o,
query_to_code,
)
from ..visuals import save_diagram, show_diagram
try:
from xgboost import XGBClassifier as XGBC
from xgboost import XGBRegressor as XGBR
except:
XGBC, XGBR = None, None
try:
from lightgbm import LGBMClassifier as LGBMC
from lightgbm import LGBMRegressor as LGBMR
except:
LGBMC, LGBMR = None, None
try:
from catboost import CatBoostClassifier as CBC
from catboost import CatBoostRegressor as CBR
except:
CBC, CBR = None, None
try:
from wekalearn import RandomForestClassifier as WLC
from wekalearn import RandomForestRegressor as WLR
except:
WLC, WLR = None, None
class Mercs(object):
delimiter = "_"
selection_algorithms = dict(
default=selection.base_selection_algorithm,
base=selection.base_selection_algorithm,
random=selection.random_selection_algorithm,
)
induction_algorithms = dict(
base=base_induction_algorithm,
default=base_induction_algorithm,
expand=expand_induction_algorithm,
)
classifier_algorithms = dict(
DT=DecisionTreeClassifier,
DDT=DecoratedDecisionTreeClassifier,
RF=RandomForestClassifier,
DRF=DecoratedRandomForestClassifier,
XGB=XGBC,
xgb=XGBC,
weka=WLC,
LGBM=LGBMC,
lgbm=LGBMC,
CB=CBC,
extra=ExtraTreesClassifier,
)
regressor_algorithms = dict(
DT=DecisionTreeRegressor,
DDT=DecoratedDecisionTreeRegressor,
RF=RandomForestRegressor,
DRF=DecoratedDecisionTreeRegressor,
XGB=XGBR,
xgb=XGBR,
weka=WLR,
LGBM=LGBMR,
lgbm=LGBMR,
CB=CBR,
extra=ExtraTreesRegressor,
)
prediction_algorithms = dict(
mi=vector_prediction.mi,
mrai=vector_prediction.mrai,
it=vector_prediction.it,
rw=vector_prediction.rw,
)
inference_algorithms = dict(
base=inference.base_inference_algorithm,
dask=inference_v3.inference_algorithm,
own=inference_v3.inference_algorithm,
)
imputer_algorithms = dict(
nan=imputation.nan_imputation,
NAN=imputation.nan_imputation,
NaN=imputation.nan_imputation,
null=imputation.nan_imputation,
NULL=imputation.nan_imputation,
skl=imputation.skl_imputation,
base=imputation.skl_imputation,
default=imputation.skl_imputation,
)
evaluation_algorithms = dict(
base=evaluation.base_evaluation,
default=evaluation.base_evaluation,
dummy=evaluation.dummy_evaluation,
)
# Used in parse kwargs to identify parameters. If this identification goes wrong, you are sending settings
# somewhere you do not want them to be. So, this is a tricky part, and moreover hardcoded. In other words:
# this is risky terrain, and should probably be done differently in the future.
configuration_prefixes = dict(
imputation={"imputation", "imp"},
induction={"induction", "ind"},
selection={"selection", "sel"},
prediction={"prediction", "pred", "prd"},
inference={"inference", "infr", "inf"},
classification={"classification", "classifier", "clf"},
regression={"regression", "regressor", "rgr"},
metadata={"metadata", "meta", "mtd"},
evaluation={"evaluation", "evl"},
)
def __init__(
self,
selection_algorithm="base",
induction_algorithm="base",
classifier_algorithm="DT",
regressor_algorithm="DT",
prediction_algorithm="mi",
inference_algorithm="own",
imputer_algorithm="default",
evaluation_algorithm="default",
random_state=42,
**kwargs
):
self.params = dict(
selection_algorithm=selection_algorithm,
induction_algorithm=induction_algorithm,
classifier_algorithm=classifier_algorithm,
regressor_algorithm=regressor_algorithm,
prediction_algorithm=prediction_algorithm,
inference_algorithm=inference_algorithm,
imputer_algorithm=imputer_algorithm,
evaluation_algorithm=evaluation_algorithm,
random_state=random_state,
)
self.params = {**self.params, **kwargs}
self.random_state = random_state
self.selection_algorithm = self.selection_algorithms[selection_algorithm]
# N.b.: First try to look up the key. If the key is not found, we assume the algorithm itself was passed.
self.classifier_algorithm = self.classifier_algorithms.get(
classifier_algorithm, classifier_algorithm
)
self.regressor_algorithm = self.regressor_algorithms.get(
regressor_algorithm, regressor_algorithm
)
self.prediction_algorithm = self.prediction_algorithms[prediction_algorithm]
self.inference_algorithm = self.inference_algorithms[inference_algorithm]
self.induction_algorithm = self.induction_algorithms[
induction_algorithm
] # For now, we only have one.
self.imputer_algorithm = self.imputer_algorithms[imputer_algorithm]
self.evaluation_algorithm = self.evaluation_algorithms[evaluation_algorithm]
# Data-structures
self.m_codes = np.array([])
self.m_list = []
self.c_list = []
self.g_list = []
self.i_list = []
self.m_fimps = np.array([])
self.m_score = np.array([])
self.FI = np.array([])
self.targ_ids = np.array([])
# Query-related things
self.q_code = None
self.q_desc_ids = None
self.q_targ_ids = None
self.q_diagram = None
self.q_compose = None
self.q_methods = []
# Configurations
self.imp_cfg = self._default_config(self.imputer_algorithm)
self.ind_cfg = self._default_config(self.induction_algorithm)
self.sel_cfg = self._default_config(self.selection_algorithm)
self.clf_cfg = self._default_config(self.classifier_algorithm)
self.rgr_cfg = self._default_config(self.regressor_algorithm)
self.prd_cfg = self._default_config(self.prediction_algorithm)
self.inf_cfg = self._default_config(self.inference_algorithm)
self.evl_cfg = self._default_config(self.evaluation_algorithm)
self.configuration = dict(
imputation=self.imp_cfg,
induction=self.ind_cfg,
selection=self.sel_cfg,
classification=self.clf_cfg,
regression=self.rgr_cfg,
prediction=self.prd_cfg,
inference=self.inf_cfg,
) # Collect all configs in one
self._update_config(random_state=random_state, **kwargs)
self.metadata = dict()
self.model_data = dict()
self._extra_checks_on_config()
return
def fit(self, X, y=None, m_codes=None, **kwargs):
assert isinstance(X, np.ndarray)
if y is not None:
assert isinstance(y, np.ndarray)
X = np.c_[X, y]
tic = default_timer()
self.metadata = self._default_metadata(X)
self._update_metadata(**kwargs)
self.i_list = self.imputer_algorithm(X, self.metadata.get("nominal_attributes"))
# N.b.: `random state` parameter is in `self.sel_cfg`
if m_codes is None:
self.m_codes = self.selection_algorithm(self.metadata, **self.sel_cfg)
else:
self.m_codes = m_codes
self.m_list = self.induction_algorithm(
X,
self.m_codes,
self.metadata,
self.classifier_algorithm,
self.regressor_algorithm,
self.clf_cfg,
self.rgr_cfg,
**self.ind_cfg
)
self._filter_m_list_m_codes()
self._consistent_datastructures()
if self.imputer_algorithm == self.imputer_algorithms.get("nan"):
# If you do no have imputers, you cannot use them as a baseline evaluation
self.evl_cfg["consider_imputations"] = False
self.m_score = self.evaluation_algorithm(
X, self.m_codes, self.m_list, self.i_list, **self.evl_cfg
)
toc = default_timer()
self.model_data["ind_time"] = toc - tic
self.metadata["n_component_models"] = len(self.m_codes)
return
def predict(
self,
X,
q_code=None,
inference_algorithm=None,
prediction_algorithm=None,
**kwargs
):
# Update configuration if necessary
if q_code is None:
q_code = self._default_q_code()
if inference_algorithm is not None:
self._reconfig_inference(inference_algorithm=inference_algorithm)
if prediction_algorithm is not None:
self._reconfig_prediction(
prediction_algorithm=prediction_algorithm, **kwargs
)
# Adjust data
self.q_code = q_code
self.q_desc_ids, self.q_targ_ids, _ = code_to_query(
self.q_code, return_list=True
)
# Make query-diagram
tic_prediction = default_timer()
self.m_sel = self.prediction_algorithm(
self.m_codes, self.m_fimps, self.m_score, q_code=self.q_code, **self.prd_cfg
)
toc_prediction = default_timer()
tic_diagram = default_timer()
self.q_diagram = self._build_q_diagram(self.m_list, self.m_sel)
toc_diagram = default_timer()
tic_infalgo = default_timer()
if isinstance(self.q_diagram, tuple):
self.q_diagrams = self.q_diagram
# for d in self.q_diagrams:
# print(d.nodes)
# self.c_list.append(self._build_q_model(X, d))
self.c_list = [self._build_q_model(X, d) for d in self.q_diagrams]
self.c_sel = list(range(len(self.c_list)))
self.c_diagram = self._build_q_diagram(
self.c_list, self.c_sel, composition=True
)
self.q_model = self._build_q_model(X, self.c_diagram)
else:
self.q_model = self._build_q_model(X, self.q_diagram)
toc_infalgo = default_timer()
tic_dask = default_timer()
X = X[:, self.q_model.desc_ids]
result = self.q_model.predict(X)
toc_dask = default_timer()
self.model_data["prd_time"] = toc_prediction - tic_prediction
self.model_data["dia_time"] = toc_diagram - tic_diagram
self.model_data["infalgo_time"] = toc_infalgo - tic_infalgo
self.model_data["dsk_time"] = toc_dask - tic_dask
self.model_data["inf_time"] = toc_dask - tic_prediction
return result
def get_params(self, deep=False):
return self.params
# Diagrams
def _build_q_diagram(self, m_list, m_sel, composition=False):
if isinstance(m_sel, tuple):
diagrams = [
build_diagram(
m_list,
m_sel_instance,
self.q_code,
prune=True,
composition=composition,
)
for m_sel_instance in m_sel
]
return tuple(diagrams)
else:
return build_diagram(
m_list, m_sel, self.q_code, prune=True, composition=composition
)
def show_q_diagram(self, kind="svg", fi=False, ortho=False, index=None, **kwargs):
if isinstance(self.q_diagram, tuple) and index is None:
return show_diagram(self.c_diagram, kind=kind, fi=fi, ortho=ortho, **kwargs)
elif isinstance(self.q_diagram, tuple):
return show_diagram(
self.q_diagram[index], kind=kind, fi=fi, ortho=ortho, **kwargs
)
else:
return show_diagram(self.q_diagram, kind=kind, fi=fi, ortho=ortho, **kwargs)
def save_diagram(self, fname=None, kind="svg", fi=False, ortho=False):
return save_diagram(self.q_diagram, fname, kind=kind, fi=fi, ortho=ortho)
# Inference
def _build_q_model(self, X, diagram):
try:
self.inference_algorithm(
diagram,
self.m_list,
self.i_list,
self.c_list,
X,
self.metadata.get("nominal_attributes"),
)
except NetworkXUnfeasible:
cycle = find_cycle(self.q_diagram, orientation="original")
msg = """
Topological sort failed, investigate diagram to debug.
I will never be able to squeeze a prediction out of a diagram with a loop.
Cycle was: {}
""".format(
cycle
)
raise RecursionError(msg)
n_component_models = self.metadata["n_component_models"]
q_model = NewCompositeModel(
diagram,
nominal_attributes=self.metadata["nominal_attributes"],
n_component_models=n_component_models,
)
return q_model
def _merge_q_models(self, q_models):
q_diagram = build_diagram(self.c_list, self.c_sel, self.q_code, prune=True)
return q_diagram
def merge_models(self, q_models):
types = self._get_types(self.metadata)
walks = [
model_to_graph(m, types, idx=idx, composition=True)
for idx, m in enumerate(q_models)
]
q_diagram = compose_all(walks)
filtered_nodes = self.filter_nodes(q_diagram)
try:
self.inference_algorithm(q_diagram, sorted_nodes=filtered_nodes)
except NetworkXUnfeasible:
cycle = find_cycle(q_diagram, orientation="original")
msg = """
Topological sort failed, investigate diagram to debug.
I will never be able to squeeze a prediction out of a diagram with a loop.
Cycle was: {}
""".format(
cycle
)
raise RecursionError(msg)
q_model = CompositeModel(q_diagram)
return q_diagram, q_model
def _get_q_model(self, q_diagram, X):
self._add_imputer_function(q_diagram)
try:
self.inference_algorithm(q_diagram, X=X)
except NetworkXUnfeasible:
cycle = find_cycle(q_diagram, orientation="original")
msg = """
Topological sort failed, investigate diagram to debug.
I will never be able to squeeze a prediction out of a diagram with a loop.
Cycle was: {}
""".format(
cycle
)
raise RecursionError(msg)
q_model = CompositeModel(q_diagram)
return q_model
# Filter
def _filter_m_list_m_codes(self):
"""Filtering out the failed models.
This happens when TODO: EXPLAIN
"""
fail_m_idxs = [i for i, m in enumerate(self.m_list) if m is None]
self.m_codes = np.delete(self.m_codes, fail_m_idxs, axis=0)
self.m_list = [m for m in self.m_list if m is not None]
return
# Graphs
def _consistent_datastructures(self, binary_scores=False):
self._update_m_codes()
self._update_m_fimps()
return
def _expand_m_list(self):
self.m_list = list(itertools.chain.from_iterable(self.m_list))
return
def _add_model(self, model, binary_scores=False):
self.m_list.append(model)
self._consistent_datastructures(binary_scores=binary_scores)
return
def _update_m_codes(self):
self.m_codes = np.array(
[
query_to_code(
list(model.desc_ids),
list(model.targ_ids),
attributes=self.metadata["attributes"],
)
for model in self.m_list
]
)
return
def _update_m_fimps(self):
init = np.zeros(self.m_codes.shape)
for m_idx, mod in enumerate(self.m_list):
init[m_idx, list(mod.desc_ids)] = mod.feature_importances_
self.m_fimps = init
return
def _update_m_score(self, binary_scores=False):
if binary_scores:
self.m_score = (self.m_codes == TARG_ENCODING).astype(float)
return
# Imputer
def _add_imputer_function(self, g):
for n in g.nodes:
if g.nodes[n]["kind"] == "imputation":
idx = g.nodes[n]["idx"]
f_1 = self._dummy_array # Artificial input
f_2 = self.i_list[idx].transform # Actual imputation
f_3 = np.ravel # Return a vector, not array
g.nodes[n]["function"] = o(f_3, o(f_2, f_1))
return
# Add ids
@staticmethod
def _add_ids(g, desc_ids, targ_ids):
g.graph["desc_ids"] = set(desc_ids)
g.graph["targ_ids"] = set(targ_ids)
return g
# Metadata
def _default_metadata(self, X):
if X.ndim != 2:
X = X.reshape(-1, 1)
n_rows, n_cols = X.shape
types = [X[0, 0].dtype for _ in range(n_cols)]
nominal_attributes = set(
[att for att, typ in enumerate(types) if self._is_nominal(typ)]
)
numeric_attributes = set(
[att for att, typ in enumerate(types) if self._is_numeric(typ)]
)
metadata = dict(
attributes=set(range(n_cols)),
n_attributes=n_cols,
types=types,
nominal_attributes=nominal_attributes,
numeric_attributes=numeric_attributes,
)
return metadata
def _update_metadata(self, **kwargs):
self._update_dictionary(self.metadata, kind="metadata", **kwargs)
# Assure every attribute is `typed`: If not every attribute is here, set to numeric type (default)
numeric = self.metadata["numeric_attributes"]
nominal = self.metadata["nominal_attributes"]
att_ids = self.metadata["attributes"]
# All attributes should be accounted for and none should be double.
if (len(nominal) + len(numeric) - len(att_ids)) != 0:
numeric = att_ids - nominal
self._update_dictionary(
self.metadata, kind="metadata", numeric_attributes=numeric
)
return
# Configuration
def _reconfig_prediction(self, prediction_algorithm="mi", **kwargs):
self.prediction_algorithm = self.prediction_algorithms[prediction_algorithm]
self.prd_cfg = self._default_config(self.prediction_algorithm)
self.configuration["prediction"] = self.prd_cfg
self._update_config(**kwargs)
return
def _reconfig_inference(self, inference_algorithm="own", **kwargs):
self.inference_algorithm = self.inference_algorithms[inference_algorithm]
self.inf_cfg = self._default_config(self.inference_algorithm)
self.configuration["inference"] = self.inf_cfg
self._update_config(**kwargs)
return
@staticmethod
def _default_config(method):
config = {}
sgn = signature(method)
for key, parameter in sgn.parameters.items():
if parameter.default is not parameter.empty:
config[key] = parameter.default
return config
def _update_config(self, **kwargs):
for kind in self.configuration:
self._update_dictionary(self.configuration[kind], kind=kind, **kwargs)
return
def _extra_checks_on_config(self):
self._check_xgb_single_target()
return
def _check_xgb_single_target(self):
nb_targets = self.configuration["selection"]["nb_targets"]
if nb_targets == 1:
return None
else:
if (
self.classifier_algorithm is self.classifier_algorithms["XGB"]
or self.regressor_algorithm is self.regressor_algorithms["XGB"]
):
xgb = True
else:
xgb = False
if xgb:
msg = """
XGBoost cannot deal with multi-target outputs.
Hence, the `nb_targets` parameter is automatically adapted to 1,
so only single-target trees will be learned.
Please take this into account.
"""
warnings.warn(msg)
self.configuration["selection"]["nb_targets"] = 1
return
def _parse_kwargs(self, kind="selection", **kwargs):
prefixes = [e + self.delimiter for e in self.configuration_prefixes[kind]]
parameter_map = {
x.split(prefix)[1]: x
for x in kwargs
for prefix in prefixes
if x.startswith(prefix)
}
return parameter_map
def _update_dictionary(self, dictionary, kind=None, **kwargs):
# Immediate matches
overlap = set(dictionary).intersection(set(kwargs))
for k in overlap:
dictionary[k] = kwargs[k]
if kind is not None:
# Parsed matches
parameter_map = self._parse_kwargs(kind=kind, **kwargs)
overlap = set(dictionary).intersection(set(parameter_map))
for k in overlap:
dictionary[k] = kwargs[parameter_map[k]]
return
# Helpers
def _filter_X(self, X):
# Filter relevant input attributes
if X.shape[1] != len(self.q_compose.desc_ids):
indices = self._overlapping_indices(
self.q_desc_ids, self.q_compose.desc_ids
)
return X[:, indices]
@staticmethod
def _dummy_array(X):
"""
Return an array of np.nan, with the same number of rows as the input array.
Parameters
----------
X: np.ndarray(), n_rows, n_cols = X.shape,
We use the shape of X to deduce shape of our output.
Returns
-------
a: np.ndarray(), shape= (n_rows, 1)
n_rows is the same as the number of rows as X.
"""
n_rows, _ = X.shape
a = np.empty((n_rows, 1))
a.fill(np.nan)
return a
def _default_q_code(self):
q_code = | np.zeros(self.metadata["n_attributes"]) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import math
color = ['green', 'darkred', 'skyblue']
degree = [5, 10, 14]
one = np.ones(15)
noise = np.random.normal(loc=0.0, scale=0.04, size=20)
x = np.linspace(0, 1, 20)
x = np.random.permutation(x)
#print("{}".format( x ))
# training error and testing error
for k in range(len(degree)):
testX = x[15:]
X = np.vstack([ np.power(x[:15], degree[k]), np.power(x[:15], degree[k]-1) ])
for i in range(degree[k]-2):
X = np.vstack([ X, np.power(x[:15], degree[k]-2-i) ])
X = np.vstack([ X, one])
#print("{}".format( noise ))
#print("{}".format( x ))
#print("{}".format( one ))
#print("{}".format( X ))
#print("{}".format( X.shape ))
#print("{}".format( testX ))
y = np.add(np.sin(np.pi * x[:15] * 2), noise[:15])
#print("{}".format( y ))
testY = np.add(np.sin(np.pi * x[15:] * 2), noise[15:])
#print("{}".format( testY ))
Wlin = np.linalg.inv(X.dot(X.T)).dot(X).dot(y.T)
#print("{}".format( Wlin ))
c = 0
for i in range(degree[k]):
c += np.power(x[:15], degree[k]-i) * Wlin[i]
c += Wlin[degree[k]]
d = 0
for i in range(degree[k]):
d += np.power(x[15:], degree[k]-i) * Wlin[i]
d += Wlin[degree[k]]
trainError = math.pow( np.square(np.subtract( c, y )).mean(), 1/2 )
print("training error with degree {D} : {T}".format( D=degree[k],T=trainError ))
testError = math.pow( np.square(np.subtract( d, testY )).mean(), 1/2 )
print("testing errors with degree {D} : {T}".format( D=degree[k],T=testError ))
a = np.linspace(0, 1, 1000)
b = 0
for i in range(degree[k]):
b += np.power(a, degree[k]-i) * Wlin[i]
b += Wlin[degree[k]]
#b = np.power(a, 5) * Wlin[0] + np.power(a, 4) * Wlin[1] + np.power(a, 3) * Wlin[2] + np.power(a, 2) * Wlin[3] + a * Wlin[4] + Wlin[5]
plt.plot(a, b, color = color[k])
plt.scatter(x[:15], y, color = 'blue')
plt.scatter(x[15:], testY, color = 'yellow')
plt.ylim(-2, 2)
plt.title('Fitting plots for training error and testing error with degree - {}'.format(degree))
plt.xlabel("trainingPoint : blue , testingPoint : yellow\ngreen : degree-5 , darkred : degree-10 , skyblue : degree-14")
plt.show()
# cross-validation errors ( leave-one-out )
for k in range(len(degree)):
sum = 0
for i in range(15):
tmpX = np.delete(x[:15], i)
validX = x[i]
X = np.vstack([ np.power(tmpX, degree[k]), np.power(tmpX, degree[k]-1) ])
for j in range(degree[k]-2):
X = np.vstack([ X, np.power(tmpX, degree[k]-2-j) ])
X = np.vstack([ X, one[:14]])
#print("{}".format( tmpX ))
#print("{}".format( validX ))
#print("{}".format( X ))
tmpY = np.add(np.sin(np.pi * tmpX * 2), np.delete(noise[:15], i) )
#print("{}".format( tmpY ))
validY = np.add(np.sin(np.pi * validX * 2), noise[i])
#print("{}".format( validY ))
Wlin = np.linalg.inv(X.dot(X.T)).dot(X).dot(tmpY.T)
#print("{}".format( Wlin ))
d = 0
for j in range(degree[k]):
d += np.power(validX, degree[k]-j) * Wlin[j]
d += Wlin[degree[k]]
crossError = np.abs(np.subtract( d, validY ))
#print("{}".format( crossError ))
sum += crossError
a = np.linspace(0, 1, 1000)
b = 0
for i in range(degree[k]):
b += | np.power(a, degree[k]-i) | numpy.power |
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp
def make_data_loader(cfg, is_train, is_distributed=False, start_iter=0):
scale = cfg.SPARSE3D.VOXEL_SCALE
full_scale=cfg.SPARSE3D.VOXEL_FULL_SCALE
val_reps = cfg.SPARSE3D.VAL_REPS
batch_size = cfg.SOLVER.IMS_PER_BATCH
dimension=3
# VALID_CLAS_IDS have been mapped to the range {0,1,...,19}
VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
def get_files(split):
import os
cur_path = os.path.dirname(os.path.abspath(__file__))
dset_path = f'{cur_path}/ScanNetTorch'
with open(f'{cur_path}/Benchmark_Small/scannetv1_{split}.txt') as f:
scene_names = [l.strip() for l in f.readlines()]
files = [f'{dset_path}/{scene}/{scene}_vh_clean_2.pth' for scene in scene_names]
return files
train,val=[],[]
for x in torch.utils.data.DataLoader(
get_files('train'),
collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()):
train.append(x)
for x in torch.utils.data.DataLoader(
get_files('val'),
collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()):
val.append(x)
print('Training examples:', len(train))
print('Validation examples:', len(val))
#Elastic distortion
blur0=np.ones((3,1,1)).astype('float32')/3
blur1=np.ones((1,3,1)).astype('float32')/3
blur2= | np.ones((1,1,3)) | numpy.ones |
try:
from schwimmbad import SerialPool, MultiPool
from functools import partial
except:
print('running without parallelization.')
import numpy as np
import os
import time
from astropy.table import Table
import time
import pylab as pl
from IPython import display
from .gp_sfh import *
from .sed_fitter import *
from .pre_grid import *
from .priors import *
def gen_pg_parallel(data_i, atlas_vals):
fname, zval, priors, pg_folder, filter_list, filt_dir, N_pregrid = atlas_vals
fname_full = fname + '_zval_%.0f_' %(zval*10000) + '_chunk_%.0f' %(data_i)
generate_atlas(N_pregrid = N_pregrid,
priors = priors,
fname = fname_full, store=True, path=pg_folder,
filter_list = filter_list, filt_dir = filt_dir,
rseed = (N_pregrid * data_i + 1))
return
def generate_atlas_in_parallel_chunking(zval, chunksize, nchunks, fname = 'temp_parallel_atlas', filter_list = 'filter_list_goodss.dat', filt_dir = 'internal', priors = [], z_bw = 0.05, pg_folder = 'parallel_atlases/'):
N_pregrid = chunksize
atlas_vals = [fname, zval, priors, pg_folder, filter_list, filt_dir, N_pregrid]
time_start = time.time()
try:
with MultiPool() as pool:
values = list(pool.map(partial(gen_pg_parallel,atlas_vals), data))
finally:
print('Generated pregrid (%.0f chunks, %.0f sedsperchunk) at zval',zval)
print('time taken: %.2f mins.' %((time.time()-time_start)/60))
# need to add code here to then concatenate chunks into a single file and delete the individual ones
return
def make_atlas_parallel(zval, atlas_params):
"""
Make a single atlas given a redshift value and a list of parameters (including a priors object).
Atlas Params: [N_pregrid, priors, fname, store, path, filter_list, filt_dir, z_bw]
"""
# currently only works for photometry, change to include a variable list of atlas_kwargs
N_pregrid, priors, fname, store, path, filter_list, filt_dir, z_bw = atlas_params
priors.z_max = zval + z_bw/2
priors.z_min = zval - z_bw/2
fname = fname+'_zval_%.0f_' %(zval*10000)
generate_atlas(N_pregrid = N_pregrid,
priors = priors,
fname = fname, store=True, path=path,
filter_list = filter_list, filt_dir = filt_dir,
rseed = int(zval*100000))
return
def generate_atlas_in_parallel_zgrid(zgrid, atlas_params, dynamic_decouple = True):
"""
Make a set of atlases given a redshift grid and a list of parameters (including a priors object).
Atlas Params: [N_pregrid, priors, fname, store, path, filter_list, filt_dir, z_bw]
"""
time_start = time.time()
try:
with MultiPool() as pool:
values = list(pool.map(partial(make_atlas_parallel, atlas_params = atlas_params), zgrid))
finally:
time_end = time.time()
print('time taken [parallel]: %.2f min.' %((time_end-time_start)/60))
#------------------------------------------------------------------------------------
def fit_gals(gal_id, catvals):
#if not fit_mask:
if len(catvals) == 3:
cat_seds, cat_errs, atlas = catvals
fit_mask = []
elif len(catvals) == 4:
cat_seds, cat_errs, fit_mask, atlas = catvals
else:
print('wrong number of arguments supplied to fitter')
gal_sed = cat_seds[gal_id, 0:].copy()
gal_err = cat_errs[gal_id, 0:].copy()
#gal_err = cat_errs[gal_id, 0:].copy() + gal_sed*0.03
#gal_err = cat_errs[gal_id, 0:].copy() + gal_sed*0.1
#gal_err = cat_errs[gal_id, 0:].copy() + gal_sed*0.5
fit_likelihood, fit_norm_fac = evaluate_sed_likelihood(gal_sed,gal_err,atlas,fit_mask=fit_mask,
zbest=None,deltaz=None)
quants = get_quants(fit_likelihood, atlas, fit_norm_fac)
return quants, fit_likelihood
# try:
# map_mstar = evaluate_MAP(atlas['mstar']+np.log10(fit_norm_fac),
# np.exp(-fit_likelihood/2),
# bins = np.arange(4,14,0.001),
# smooth = 'kde', lowess_frac = 0.3, vb = False)
# map_sfr = evaluate_MAP(atlas['sfr']+np.log10(fit_norm_fac),
# np.exp(-fit_likelihood/2),
# bins = np.arange(-6,4,0.001),
# smooth = 'kde', lowess_frac = 0.3, vb = False)
# return quants, fit_likelihood, map_mstar, map_sfr
# except:
# print('couldnt calculate MAP for galid: ',gal_id)
# return quants, fit_likelihood, np.nan, np.nan
def fit_catalog(fit_cat, atlas_path, atlas_fname, output_fname, N_pregrid = 10000, N_param = 3, z_bw = 0.05, f160_cut = 100, fit_mask = [], zgrid = [], sfr_uncert_cutoff = 2.0):
cat_id, cat_zbest, cat_seds, cat_errs, cat_f160, cat_class_star = fit_cat
#if not zgrid:
if isinstance(zgrid, (np.ndarray)) == False:
zgrid = np.arange(np.amin(cat_zbest),np.amax(cat_zbest),z_bw)
fit_id = cat_id.copy()
fit_logM_50 = np.zeros_like(cat_zbest)
fit_logM_MAP = np.zeros_like(cat_zbest)
fit_logM_16 = np.zeros_like(cat_zbest)
fit_logM_84 = np.zeros_like(cat_zbest)
fit_logSFRinst_50 = np.zeros_like(cat_zbest)
fit_logSFRinst_MAP = np.zeros_like(cat_zbest)
fit_logSFRinst_16 = np.zeros_like(cat_zbest)
fit_logSFRinst_84 = np.zeros_like(cat_zbest)
fit_logZsol_50 = np.zeros_like(cat_zbest)
fit_logZsol_16 = np.zeros_like(cat_zbest)
fit_logZsol_84 = np.zeros_like(cat_zbest)
fit_Av_50 = np.zeros_like(cat_zbest)
fit_Av_16 = np.zeros_like(cat_zbest)
fit_Av_84 = np.zeros_like(cat_zbest)
fit_zfit_50 = np.zeros_like(cat_zbest)
fit_zfit_16 = np.zeros_like(cat_zbest)
fit_zfit_84 = np.zeros_like(cat_zbest)
fit_logMt_50 = np.zeros_like(cat_zbest)
fit_logMt_16 = np.zeros_like(cat_zbest)
fit_logMt_84 = np.zeros_like(cat_zbest)
fit_logSFR100_50 = np.zeros_like(cat_zbest)
fit_logSFR100_16 = np.zeros_like(cat_zbest)
fit_logSFR100_84 = np.zeros_like(cat_zbest)
fit_nparam = np.zeros_like(cat_zbest)
fit_t25_50 = np.zeros_like(cat_zbest)
fit_t25_16 = np.zeros_like(cat_zbest)
fit_t25_84 = np.zeros_like(cat_zbest)
fit_t50_50 = np.zeros_like(cat_zbest)
fit_t50_16 = np.zeros_like(cat_zbest)
fit_t50_84 = np.zeros_like(cat_zbest)
fit_t75_50 = np.zeros_like(cat_zbest)
fit_t75_16 = np.zeros_like(cat_zbest)
fit_t75_84 = np.zeros_like(cat_zbest)
fit_nbands = np.zeros_like(cat_zbest)
fit_f160w = np.zeros_like(cat_zbest)
fit_stellarity = np.zeros_like(cat_zbest)
fit_chi2 = np.zeros_like(cat_zbest)
fit_flags = np.zeros_like(cat_zbest)
for i in (range(len(zgrid))):
print('loading atlas at', zgrid[i])
# for a given redshift slice,
zval = zgrid[i]
# select the galaxies to be fit
z_mask = (cat_zbest < (zval + z_bw/2)) & (cat_zbest > (zval - z_bw/2)) & (cat_f160 < f160_cut)
fit_ids = np.arange(len(cat_zbest))[z_mask]
# for gal_id in fit_ids:
# gal_sed = cat_seds[gal_id, 0:]
# gal_err = cat_errs[gal_id, 0:]
# fit_likelihood, fit_norm_fac = evaluate_sed_likelihood(gal_sed,gal_err,atlas,fit_mask=[],
# zbest=None,deltaz=None)
# quants = get_quants(fit_likelihood, atlas, fit_norm_fac)
print('starting parallel fitting for Ngals = ',len(fit_ids),' at redshift ', str(zval))
try:
# load the atlas
fname = atlas_fname+'_zval_%.0f_' %(zgrid[i]*10000)
atlas = load_atlas(fname, N_pregrid, N_param = N_param, path = atlas_path)
print('loaded atlas')
with MultiPool() as pool:
# note: Parallel doesn't work in Python2.6
# if not fit_mask:
if isinstance(fit_mask, np.ndarray) == False:
all_quants = list(pool.map(partial(fit_gals, catvals=(cat_seds, cat_errs, atlas)), fit_ids))
else:
all_quants = list(pool.map(partial(fit_gals, catvals=(cat_seds, cat_errs, fit_mask, atlas)), fit_ids))
print('finished fitting parallel zbest chunk at z=%.3f' %zval)
print('starting to put values in arrays')
for ii, gal_id in enumerate(fit_ids):
gal_sed = cat_seds[gal_id, 0:]
gal_err = cat_errs[gal_id, 0:]
quants = all_quants[ii][0]
fit_likelihood = all_quants[ii][1]
# fit_logM_MAP[gal_id] = all_quants[ii][2]
# fit_logSFRinst_MAP[gal_id] = all_quants[ii][3]
fit_logM_50[gal_id] = quants[0][0]
fit_logM_16[gal_id] = quants[0][1]
fit_logM_84[gal_id] = quants[0][2]
fit_logSFRinst_50[gal_id] = quants[1][0]
fit_logSFRinst_16[gal_id] = quants[1][1]
fit_logSFRinst_84[gal_id] = quants[1][2]
fit_Av_50[gal_id] = quants[2][0]
fit_Av_16[gal_id] = quants[2][1]
fit_Av_84[gal_id] = quants[2][2]
fit_logZsol_50[gal_id] = quants[3][0]
fit_logZsol_16[gal_id] = quants[3][1]
fit_logZsol_84[gal_id] = quants[3][2]
fit_zfit_50[gal_id] = quants[4][0]
fit_zfit_16[gal_id] = quants[4][1]
fit_zfit_84[gal_id] = quants[4][2]
fit_logMt_50[gal_id] = quants[5][0][0]
fit_logMt_16[gal_id] = quants[5][1][0]
fit_logMt_84[gal_id] = quants[5][2][0]
fit_logSFR100_50[gal_id] = quants[5][0][1]
fit_logSFR100_16[gal_id] = quants[5][1][1]
fit_logSFR100_84[gal_id] = quants[5][2][1]
fit_nparam[gal_id] = quants[5][0][2]
fit_t25_50[gal_id] = quants[5][0][3]
fit_t25_16[gal_id] = quants[5][1][3]
fit_t25_84[gal_id] = quants[5][2][3]
fit_t50_50[gal_id] = quants[5][0][4]
fit_t50_16[gal_id] = quants[5][1][4]
fit_t50_84[gal_id] = quants[5][2][4]
fit_t75_50[gal_id] = quants[5][0][5]
fit_t75_16[gal_id] = quants[5][1][5]
fit_t75_84[gal_id] = quants[5][2][5]
fit_nbands[gal_id] = np.sum(gal_sed>0)
fit_f160w[gal_id] = cat_f160[gal_id]
fit_stellarity[gal_id] = cat_class_star[gal_id]
fit_chi2[gal_id] = np.amin(fit_likelihood)
# flagging galaxies that either
# 1. have nan values for mass
# 2. have SFR uncertainties > sfr_uncert_cutoff
# 3. are flagged as a star
# 4. have extremely large chi2
if | np.isnan(quants[0][0]) | numpy.isnan |
__package__ = "valkyrie_gym_env"
import scipy
import os, inspect, time
from pybullet_utils.bullet_client import BulletClient
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
# from rllab.envs.pybullet.valkyrie_multi_env.utils.util import quat_to_rot, rotX, rotY, rotZ
from b3px_gym.b3px_env.parallel.util import quat_to_rot, rotX, rotY, rotZ
import gym
import copy
from gym import spaces
from gym.utils import seeding
import numpy as np
import time
import pybullet as pybullet
import math
from b3px_gym.b3px_env.singleton.valkyrie_gym_env.filter import FilterClass, KalmanFilter, BinaryFilter
from b3px_gym.b3px_env.singleton.valkyrie_gym_env.PD_controller import PDController
from b3px_gym.b3px_env.singleton.valkyrie_gym_env.sensor_signal_process import calCOP
class Valkyrie(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
# def __del__(self):
# self._p.disconnect()
def __init__(self,
max_time=16, # in seconds
initial_gap_time=0.01, # in seconds
isEnableSelfCollision=False,
renders=False,
Kp_scale=None,
Kd_scale=None,
planeId=None,
fixed_base=False,
time_step=0.005,
frame_skip=8,
scaling_factor = 1.0,
margin_in_degree=1000., # if > 90. then read from urdf
useFullDOF=True,
regularise_action=True
):
self.Kp_scale = dict([
("rightShoulderPitch", 1),
("rightShoulderRoll", 1),
("rightShoulderYaw", 1),
("rightElbowPitch", 1),
("leftShoulderPitch", 1),
("leftShoulderRoll", 1),
("leftShoulderYaw", 1),
("leftElbowPitch", 1),
("torsoYaw", 1),
("torsoPitch", 1),
("torsoRoll", 1),
("leftHipYaw", 1),
("leftHipRoll", 1),
("leftHipPitch", 1),
("leftKneePitch", 2),
("leftAnklePitch", 2),
("leftAnkleRoll", 1),
("rightHipYaw", 1),
("rightHipRoll", 1),
("rightHipPitch", 1),
("rightKneePitch", 2),
("rightAnklePitch", 2),
("rightAnkleRoll", 1),
]) if Kp_scale is None else Kp_scale
self.Kd_scale = dict([
("rightShoulderPitch", 1),
("rightShoulderRoll", 1),
("rightShoulderYaw", 1),
("rightElbowPitch", 1),
("leftShoulderPitch", 1),
("leftShoulderRoll", 1),
("leftShoulderYaw", 1),
("leftElbowPitch", 1),
("torsoYaw", 1),
("torsoPitch", 1),
("torsoRoll", 1),
("leftHipYaw", 1),
("leftHipRoll", 1),
("leftHipPitch", 1),
("leftKneePitch", 1),
("leftAnklePitch", 1),
("leftAnkleRoll", 1),
("rightHipYaw", 1),
("rightHipRoll", 1),
("rightHipPitch", 1),
("rightKneePitch", 1),
("rightAnklePitch", 1),
("rightAnkleRoll", 1),
]) if Kd_scale is None else Kd_scale
self.regularise_action = regularise_action
self.useFullDOF= useFullDOF
self.timestep = time_step
self.frame_skip=frame_skip
self.robot_loaded = False
self.fixed_base = fixed_base
self.planeID = planeId
self.jointIdx = {}
self.jointNameIdx = {}
self.jointLowerLimit = []
self.jointUpperLimit = []
self.total_mass = 0.0
# self._p = p
# self._seed()
self._envStepCounter = 0
self._renders = renders
self._p = BulletClient(connection_mode=pybullet.DIRECT) if not renders else BulletClient(connection_mode=pybullet.GUI)
if self.useFullDOF:
self.controlled_joints = [
"rightShoulderPitch",
"rightShoulderRoll",
"rightShoulderYaw",
"rightElbowPitch",
"leftShoulderPitch",
"leftShoulderRoll",
"leftShoulderYaw",
"leftElbowPitch",
"torsoYaw",
"torsoPitch",
"torsoRoll",
"rightHipYaw",
"rightHipRoll",
"rightHipPitch",
"rightKneePitch",
"rightAnklePitch",
"rightAnkleRoll",
"leftHipYaw",
"leftHipRoll",
"leftHipPitch",
"leftKneePitch",
"leftAnklePitch",
"leftAnkleRoll",
]
else:
self.controlled_joints = [
"rightHipPitch",
"rightKneePitch",
"rightAnklePitch",
"leftHipPitch",
"leftKneePitch",
"leftAnklePitch", ]
self.nu = len(self.controlled_joints)
self.r = -1
self.PD_freq = 1/(self.timestep*self.frame_skip)
self.Physics_freq = 1/self.timestep
self._actionRepeat = int(self.Physics_freq/self.PD_freq)
self._dt_physics = (1./ self.Physics_freq)
self._dt_PD = (1. / self.PD_freq)
self._dt = self._dt_physics # PD control loop timestep
self._dt_filter = self._dt_PD #filter time step
self.g = 9.81
self.joint_states = {}
self.u_max = dict([("torsoYaw", 190),
("torsoPitch", 150),
("torsoRoll", 150),
("rightShoulderPitch", 190),
("rightShoulderRoll", 190),
("rightShoulderYaw", 65),
("rightElbowPitch", 65),
("rightForearmYaw", 26),
("rightWristRoll", 14),
("rightWristPitch", 14),
("leftShoulderPitch", 190),
("leftShoulderRoll", 190),
("leftShoulderYaw", 65),
("leftElbowPitch", 65),
("leftForearmYaw", 26),
("leftWristRoll", 14),
("leftWristPitch", 14),
("rightHipYaw", 190),
("rightHipRoll", 350),
("rightHipPitch", 350),
("rightKneePitch", 350),
("rightAnklePitch", 205),
("rightAnkleRoll", 205),
("leftHipYaw", 190),
("leftHipRoll", 350),
("leftHipPitch", 350),
("leftKneePitch", 350),
("leftAnklePitch", 205),
("leftAnkleRoll", 205),
("lowerNeckPitch", 50),
("upperNeckPitch", 50),
("neckYaw", 50)])
self.v_max = dict([("torsoYaw", 5.89),
("torsoPitch", 9),
("torsoRoll", 9),
("rightShoulderPitch", 5.89),
("rightShoulderRoll", 5.89),
("rightShoulderYaw", 11.5),
("rightElbowPitch", 11.5),
("leftShoulderPitch", 5.89),
("leftShoulderRoll", 5.89),
("leftShoulderYaw", 11.5),
("leftElbowPitch", 11.5),
("rightHipYaw", 5.89),
("rightHipRoll", 7),
("rightHipPitch", 6.11),
("rightKneePitch", 6.11),
("rightAnklePitch", 11),
("rightAnkleRoll", 11),
("leftHipYaw", 5.89),
("leftHipRoll", 7),
("leftHipPitch", 6.11),
("leftKneePitch", 6.11),
("leftAnklePitch", 11),
("leftAnkleRoll", 11),
("lowerNeckPitch", 5),
("upperNeckPitch", 5),
("neckYaw", 5)])
# nominal joint configuration
# self.q_nom = dict([("torsoYaw", 0.0),
# ("torsoPitch", 0.0),
# ("torsoRoll", 0.0),
# ("lowerNeckPitch", 0.0),
# ("neckYaw", 0.0),
# ("upperNeckPitch", 0.0),
# ("rightShoulderPitch", 0.300196631343),
# ("rightShoulderRoll", 1.25),
# ("rightShoulderYaw", 0.0),
# ("rightElbowPitch", 0.785398163397),
# ("leftShoulderPitch", 0.300196631343),
# ("leftShoulderRoll", -1.25),
# ("leftShoulderYaw", 0.0),
# ("leftElbowPitch", -0.785398163397),
# ("rightHipYaw", 0.0),
# ("rightHipRoll", 0.0),
# ("rightAnkleRoll", 0.0),
# ("leftHipYaw", 0.0),
# ("leftHipRoll", 0.0),
# # ("rightHipPitch", -0.49), # -0.49
# # ("rightKneePitch", 1.205), # 1.205
# # ("rightAnklePitch", -0.71), # -0.71
# # ("leftHipPitch", -0.49), # -0.49
# # ("leftKneePitch", 1.205), # 1.205
# # ("leftAnklePitch", -0.71), # -0.71
# ("rightHipPitch", -0.49*0.5), # -0.49
# ("rightKneePitch", 1.205*0.5), # 1.205
# ("rightAnklePitch", -0.71*0.5), # -0.71
# ("leftHipPitch", -0.49*0.5), # -0.49
# ("leftKneePitch", 1.205*0.5), # 1.205
# ("leftAnklePitch", -0.71*0.5), # -0.71
# ("leftAnkleRoll", 0.0)])
if self.useFullDOF:
self.q_nom = dict([
("rightHipYaw", 0.0),
("rightHipRoll", -0.1),
("rightHipPitch", -0.45*1.), # -0.49
("rightKneePitch", 0.944*1.), # 1.205
("rightAnklePitch", -0.527*1.), # -0.71
("rightAnkleRoll", 0.1),
("leftHipYaw", 0.0),
("leftHipRoll", 0.1),
("leftHipPitch", -0.45*1.), # -0.49
("leftKneePitch", 0.944*1.), # 1.205
("leftAnklePitch", -0.527*1.), # -0.71
("leftAnkleRoll", -0.1),
("torsoYaw", 0.0),
("torsoPitch", 0.0),
("torsoRoll", 0.0),
("rightShoulderPitch", 0.300196631343),
("rightShoulderRoll", 1.25),
("rightShoulderYaw", 0.0),
("rightElbowPitch", 0.785398163397),
("leftShoulderPitch", 0.300196631343),
("leftShoulderRoll", -1.25),
("leftShoulderYaw", 0.0),
("leftElbowPitch", -0.785398163397),
])
else:
self.q_nom = dict([
("rightHipPitch", -0.45*1.), # -0.49 , -0.49
("rightKneePitch", 0.944*1.), # 1.205 , 1.205
("rightAnklePitch", -0.527*1.), # -0.71 , -0.8
("leftHipPitch", -0.45*1.), # -0.49 , -0.49
("leftKneePitch", 0.944*1.), # 1.205 , 1.205
("leftAnklePitch", -0.527*1.), # -0.71 , -0.8
])
margin = margin_in_degree*3.14/180
# print("MARGIN IN DEG: %.1f" % margin_in_degree)
self.margin = margin
if self.useFullDOF:
self.joint_limits_low = {
"rightHipYaw": -margin+self.q_nom["rightHipYaw"],
"rightHipRoll": -margin+self.q_nom["rightHipRoll"],
"rightHipPitch": -margin+self.q_nom["rightHipPitch"],
"rightKneePitch": -margin+self.q_nom["rightKneePitch"],
"rightAnklePitch": -margin+self.q_nom["rightAnklePitch"],
"rightAnkleRoll": -margin+self.q_nom["rightAnkleRoll"],
"leftHipYaw": -margin+self.q_nom["leftHipYaw"],
"leftHipRoll": -margin+self.q_nom["leftHipRoll"],
"leftHipPitch": -margin+self.q_nom["leftHipPitch"],
"leftKneePitch": -margin+self.q_nom["leftKneePitch"],
"leftAnklePitch": -margin+self.q_nom["leftAnklePitch"],
"leftAnkleRoll": -margin+self.q_nom["leftAnkleRoll"] }
self.joint_limits_high = {
"rightHipYaw": +margin+self.q_nom["rightHipYaw"],
"rightHipRoll": +margin+self.q_nom["rightHipRoll"],
"rightHipPitch": +margin+self.q_nom["rightHipPitch"],
"rightKneePitch": +margin+self.q_nom["rightKneePitch"],
"rightAnklePitch": +margin+self.q_nom["rightAnklePitch"],
"rightAnkleRoll": +margin+self.q_nom["rightAnkleRoll"],
"leftHipYaw": +margin+self.q_nom["leftHipYaw"],
"leftHipRoll": +margin+self.q_nom["leftHipRoll"],
"leftHipPitch": +margin+self.q_nom["leftHipPitch"],
"leftKneePitch": +margin+self.q_nom["leftKneePitch"],
"leftAnklePitch": +margin+self.q_nom["leftAnklePitch"],
"leftAnkleRoll": +margin+self.q_nom["leftAnkleRoll"]}
else:
self.joint_limits_low = {
"rightHipPitch": -margin+self.q_nom["rightHipPitch"],
"rightKneePitch": -margin+self.q_nom["rightKneePitch"],
"rightAnklePitch": -margin+self.q_nom["rightAnklePitch"],
"leftHipPitch": -margin+self.q_nom["leftHipPitch"],
"leftKneePitch": -margin+self.q_nom["leftKneePitch"],
"leftAnklePitch": -margin+self.q_nom["leftAnklePitch"],
}
self.joint_limits_high = {
"rightHipPitch": +margin+self.q_nom["rightHipPitch"],
"rightKneePitch": +margin+self.q_nom["rightKneePitch"],
"rightAnklePitch": +margin+self.q_nom["rightAnklePitch"],
"leftHipPitch": +margin+self.q_nom["leftHipPitch"],
"leftKneePitch": +margin+self.q_nom["leftKneePitch"],
"leftAnklePitch": +margin+self.q_nom["leftAnklePitch"],
}
self.linkCOMPos = {}
self.linkMass = {}
offset = 1. if self.fixed_base else 0.
self.base_pos_nom = np.array([0, 0, 1.08+offset]) # 1.175 straight #1.025 bend
self.base_orn_nom = np.array([0, 0, 0, 1]) # x,y,z,w
self.plane_pos_nom = np.array([0.,0.,0.])
self.plane_orn_nom = np.array([0.,0.,0.,1.])
self._setupSimulation()
error_for_full_torque = 10*np.pi/180.
kd_fraction = 1/100.
self.Kp = {}
self.Kd = {}
for name in self.controlled_joints:
val = self.u_max[name]/error_for_full_torque
self.Kp.update({name: val*self.Kp_scale[name]})
self.Kd.update({name: val*kd_fraction*self.Kd_scale[name]})
print("%s: %.1f" % (name, val))
self.q_nom_list = []
for jointName in self.controlled_joints:
self.q_nom_list.append(self.q_nom[jointName])
self.q_nom_list = np.array(self.q_nom_list)
self.action = self.q_nom_list
self._actionDim = len(self.controlled_joints)
observationDim = 9+self._actionDim
observation_high = np.array([np.finfo(np.float32).max] * observationDim)
self.observation_space = spaces.Box(-observation_high, observation_high)
self._observationDim = observationDim
self.action_space = spaces.Box(np.array(-self.joint_increment), np.array(self.joint_increment))
# print("Action space: ", self.action_space.low)
self.getLinkMass()
# print("observationDim", self._observationDim, "actionDim", self._actionDim)
def getLinkMass(self):
self.total_mass = 0
info = self._p.getDynamicsInfo(self.r, -1) # for base link
self.linkMass.update({"base": info[0]})
# self.linkMass.update({"pelvisBase": info[0]})
self.total_mass += info[0]
for key, value in self.jointIdx.items():
info = self._p.getDynamicsInfo(self.r, value)
self.linkMass.update({key: info[0]})
self.total_mass += info[0]
def step(self, action):
# clip action
# print("raw action", action)
action = np.clip(action, self.action_space.low, self.action_space.high)
# print("Clipped action", action)
# print("low", self.action_space.low)
# print("high", self.action_space.high)
self.action += action
self.action = np.clip(self.action, self.jointLowerLimit, self.jointUpperLimit)
# self.action = self.getJacobian([0.0, 0.2, 1.1])
# need to clip and perform jacobian here! Filter in loop
for _ in range(self._actionRepeat):
self.set_pos(self.action)
# action = self.getFilteredAction(raw_action)
# _ = self.getObservation() # filter is running in here. Thus call getObservation()
self._p.stepSimulation()
self._observation = self.getExtendedObservation()
reward = copy.copy(self.getReward())
done = copy.copy(self.checkFall())
return copy.copy(self._observation), reward, done, {}
def render(self, mode='human', close=False, distance=3, yaw=0, pitch=-30, roll=0, ):
# p.addUserDebugLine(self.COM_pos + np.array([0, 0, 2]), self.COM_pos + np.array([0, 0, -2]), [1, 0, 0], 5,
# 0.1) # TODO rendering to draw COM
# p.addUserDebugLine(self.support_polygon_center[0] + np.array([0, 0, 2]),
# self.support_polygon_center[0] + np.array([0, 0, -2]), [0, 1, 0], 5,
# 0.1) # TODO rendering to draw support polygon
# p.addUserDebugLine(self.support_polygon_center[0] + np.array([2, 0, 0]),
# self.support_polygon_center[0] + np.array([-2, 0, 0]), [0, 1, 0], 5,
# 0.1) # TODO rendering to draw support polygon
width = 1600
height = 900
base_pos, base_quat = self._p.getBasePositionAndOrientation(self.r)
base_orn = self._p.getEulerFromQuaternion(base_quat)
# yaw = base_orn[2]*180/math.pi
view_matrix = self._p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=distance,
yaw=yaw,
pitch=pitch,
roll=roll,
upAxisIndex=2,
)
#
# view_matrix = p.computeViewMatrix(
# cameraTargetPosition=base_pos,
# cameraEyePosition=np.array(base_pos)+np.array([3,-3,2]),
# cameraUpVector=np.array(base_pos)+np.array([0,0,1]),
# )
proj_matrix = self._p.computeProjectionMatrixFOV(
fov=60,
aspect=float(width)/height,
nearVal=0.1,
farVal=100.0,
)
# start_time = time.time()
(_, _, px, _, _) = self._p.getCameraImage(
width=width,
height=height,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=self._p.ER_TINY_RENDERER
)
#ER_TINY_RENDERER ER_BULLET_HARDWARE_OPENGL
rgb_array = np.array(px)
rgb_array = rgb_array[:,:,:3]
# print("Time it took to get getCameraImage: %.5fs" % (time.time()-start_time))
return rgb_array
def reset(self):
return self._reset()
def _reset(self, base_pos_nom = None, base_orn_nom = None, fixed_base = False, q_nom = None):
self.action = self.q_nom_list
seed=int((time.time()*1e6)%1e9)
np.random.seed(seed=seed)
self._p.resetSimulation()
self._setupSimulation(base_pos_nom, base_orn_nom, fixed_base, q_nom)
self._envStepCounter = 0
self._observation = self.getExtendedObservation()
#self._reading = self.getReading()
return np.array(self._observation)
def get_observation(self):
return self.getExtendedObservation()
def getExtendedObservation(self):
self._observation = self.getFilteredObservation() # filtered observation
return self._observation
def getReward(self):
return self._reward()
def applyForce(self, force=[0,0,0], linkName="base"):
if linkName == 'base':
index = -1
else:
index = self.jointIdx[linkName]#
frame_flag = self._p.LINK_FRAME
pos = [0,0,0]
# if linkName is 'base':
# pos = self.base_pos
# else:
# link_state = self._p.getLinkState(self.r, self.jointIdx[linkName])
# pos = link_state[0]
self._p.applyExternalForce(self.r, index,
forceObj=force,
posObj=pos ,#[0, 0.0035, 0],
flags=frame_flag)
def _reward(self):
x_pos_err = 0 - self.base_pos[0]
y_pos_err = 0 - self.base_pos[1]
z_pos_err = 1.05868 - self.base_pos[2] #1.128
# print("Self pos: %.2f, %.2f, %.2f " % (x_pos_err, y_pos_err, z_pos_err))
# print("Base pos: ", self.base_pos)
# print("VEL: ", self.base_vel_yaw)
x_vel_err = -self.base_vel_yaw[0]
y_vel_err = -self.base_vel_yaw[1]
z_vel_err = -self.base_vel_yaw[2]
chest_link_state = self._p.getLinkState(self.r, self.jointIdx['torsoRoll'], computeLinkVelocity=1)
torso_pitch_err = 0-chest_link_state[1][1]
pelvis_pitch_err = 0-self.base_orn[1]
torso_roll_err = 0-chest_link_state[1][0]
pelvis_roll_err = 0-self.base_orn[0]
alpha = 1e-3#1e-2#1e-1
x_pos_reward = math.exp(math.log(alpha)*(x_pos_err/0.7)**2) #1.0
y_pos_reward = math.exp(math.log(alpha)*(y_pos_err/0.7)**2) #1.0
z_pos_reward = math.exp(math.log(alpha)*(z_pos_err/0.7)**2) #1.0
x_vel_reward = math.exp(math.log(alpha)*(x_vel_err/1.0)**2) #4.0
y_vel_reward = math.exp(math.log(alpha)*(y_vel_err/1.0)**2) #4.0
z_vel_reward = math.exp(math.log(alpha)*(z_vel_err/1.0)**2) #2.0
torso_pitch_reward = math.exp(math.log(alpha)*(torso_pitch_err/1.57)**2)
pelvis_pitch_reward = math.exp(math.log(alpha)*(pelvis_pitch_err/1.57)**2)
torso_roll_reward = math.exp(math.log(alpha)*(torso_roll_err/1.57)**2)
pelvis_roll_reward = math.exp(math.log(alpha)*(pelvis_roll_err/1.57)**2)
# reward = (
# 1.0 * x_pos_reward + 0.0 * y_pos_reward + 6.0 * z_pos_reward\
# +1.0 * x_vel_reward + 1.0 * y_vel_reward + 1.0 * z_vel_reward \
# +0.0 * torso_pitch_reward + 1.0 * pelvis_pitch_reward \
# +0.0 * torso_roll_reward + 1.0 * pelvis_roll_reward \
# ) \
# * 1 / (1.0 + 0.0 + 6.0 + 1.0 + 1.0 + 1.0 + 1.0 + 1.0 )
# print(self.total_mass)
force_targ = -self.total_mass*self.g/2.0
left_foot_force_err = force_targ-self.leftContactForce[2] # Z contact force
right_foot_force_err = force_targ-self.rightContactForce[2]
left_foot_force_reward = math.exp(math.log(alpha)*(left_foot_force_err/force_targ)**2)
right_foot_force_reward = math.exp(math.log(alpha)*(right_foot_force_err/force_targ)**2)
if self.regularise_action:
velocity_error = 0
torque_error = 0
for idx, key in enumerate(self.controlled_joints):
velocity_error += (self.joint_velocity[idx] / self.v_max[key])**2
torque_error += (self.joint_torques[idx]/ self.u_max[key])**2
# print("Foot contact: ", foot_contact_term, "vel: %.2f, torque: %.2f, power: %.2f" % (velocity_penalty, torque_penalty, power_penalty))
velocity_error /= len(self.controlled_joints)
torque_error /= len(self.controlled_joints)
joint_vel_reward = math.exp(math.log(alpha)*velocity_error)
joint_torque_reward = math.exp(math.log(alpha)*velocity_error)
else:
joint_vel_reward = 0
joint_torque_reward = 0
foot_contact_term = 0
fall_term = 0
if (self.leftFoot_isInContact and self.rightFoot_isInContact): # both feet lost contact
foot_contact_term = 2.0#-5 # 1 TODO increase penalty for losing contact with the ground
elif (self.leftFoot_isInContact or self.rightFoot_isInContact): # both feet lost contact
foot_contact_term = 0.5#-5 # 1 TODO increase penalty for losing contact with the ground
# if self.checkFall():
# fall_term -= 10
reward = (
1.0 * x_pos_reward + 1.0 * y_pos_reward + 4.0 * z_pos_reward\
+1.0 * x_vel_reward + 1.0 * y_vel_reward + 1.0 * z_vel_reward \
+0.5 * torso_pitch_reward + 0.5 * pelvis_pitch_reward \
# +0.5 * torso_roll_reward + 0.5 * pelvis_roll_reward \
+1.0 * left_foot_force_reward + 1.0 * right_foot_force_reward \
+1.0 * joint_vel_reward + 1.0 * joint_torque_reward + 2.0* foot_contact_term
) \
* 10 / (2.0 + 4.0 + 2.0 + 1.0 + 1.0 + 1.0 + 1.0 + 2.0 + 2.0)
# penalize reward when joint is moving too fast
# velocity_penalty = 0
# torque_penalty = 0
# power_penalty = 0
# if self.regularise_action:
# for idx, key in enumerate(self.controlled_joints):
# velocity_penalty -= (self.joint_velocity[idx] / self.v_max[key])**2
# torque_penalty -= 0.1 * abs(self.joint_torques[idx]/ self.u_max[key])
# power_penalty -= 0.1 * abs(self.joint_velocity[idx]) * abs(self.joint_torques[idx])
# # print("Foot contact: ", foot_contact_term, "vel: %.2f, torque: %.2f, power: %.2f" % (velocity_penalty, torque_penalty, power_penalty))
# reward += foot_contact_term #+fall_term+velocity_penalty+torque_penalty+power_penalty
reward_term = dict([
("x_pos_reward", x_pos_reward),
("y_pos_reward", y_pos_reward),
("z_pos_reward", z_pos_reward),
("x_vel_reward", x_vel_reward),
("y_vel_reward", y_vel_reward),
("z_vel_reward", z_vel_reward),
("torso_pitch_reward", torso_pitch_reward),
("pelvis_pitch_reward", pelvis_pitch_reward),
("torso_roll_reward", torso_roll_reward),
("pelvis_roll_reward", pelvis_roll_reward),
("left_foot_force_reward", left_foot_force_reward),
("right_foot_force_reward", right_foot_force_reward)
])
# print("Reward: %.4f" % (reward/20))
# print("Reward: ", reward_term)
return reward, reward_term
def resetJointStates(self, base_pos_nom=None, base_orn_nom=None, q_nom=None):
# if base_pos_nom is None:
# base_pos_nom = self.base_pos_nom
# if base_orn_nom is None:
# base_orn_nom = self.base_orn_nom
if q_nom is None:
q_nom = self.q_nom
else:
#replace nominal joint angle with target joint angle
temp=dict(self.q_nom)
for key, value in q_nom.items():
temp[key] = value
q_nom = dict(temp)
self.q_nom = dict(q_nom)
for jointName in q_nom:
self._p.resetJointState(self.r,
self.jointIdx[jointName],
targetValue=q_nom[jointName],
targetVelocity=0)
# print("Reset %s to %.2f" % (jointName, q_nom[jointName]))
# print(base_orn_nom)
# self._p.resetBasePositionAndOrientation(self.r, base_pos_nom, base_orn_nom)
# self._p.resetBaseVelocity(self.r, [0, 0, 0], [0, 0, 0])
def calcCOM(self):
self.linkCOMPos.update({"base": np.array(self.base_pos)}) # base position is the COM of the pelvis
self.com_pos = np.zeros((1, 3))
for key, value in self.linkMass.items():
if key != "base":
info = self._p.getLinkState(self.r, self.jointIdx[key], computeLinkVelocity=0)
self.linkCOMPos.update({key: info[0]})
# print("WTF: %.3f" % value, np.array(self.linkCOMPos[key]))
self.com_pos += np.array(self.linkCOMPos[key]) * value
# print("KEY: %s, value: %.2f" % (key, value))
self.com_pos /= self.total_mass
# self.com_pos /= self.total_mass
# update global COM position
# self.com_pos = np.array(sum)
return self.com_pos
def _setupSimulation(self, base_pos_nom=None, base_orn_nom=None, fixed_base=False, q_nom=None):
if base_pos_nom is None:
base_pos_nom = self.base_pos_nom
if base_orn_nom is None:
base_orn_nom = self.base_orn_nom
self._p.setRealTimeSimulation(0)
self._p.setGravity(0, 0, -self.g) #TODO set gravity
self._p.setTimeStep(self._dt)
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self._p.configureDebugVisualizer(self._p.COV_ENABLE_RENDERING, 0)
plane_urdf = self.dir_path + "/plane/plane.urdf"
self.plane = self._p.loadURDF(plane_urdf, basePosition=[0, 0, 0], baseOrientation=[0,0,0,1], useFixedBase=True)
if self.useFullDOF:
valkyrie_urdf = self.dir_path + "/valkyrie_full_dof.urdf"
else:
valkyrie_urdf = self.dir_path + "/valkyrie_reduced_fixed.urdf"
# print("Fixed: ", self.fixed_base)
self.r = self._p.loadURDF(fileName=valkyrie_urdf,
basePosition=base_pos_nom,
baseOrientation=base_orn_nom,
flags=self._p.URDF_USE_INERTIA_FROM_FILE|self._p.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS,
useFixedBase=self.fixed_base,
)
self._p.configureDebugVisualizer(self._p.COV_ENABLE_RENDERING, 1)
jointIds = []
for j in range(self._p.getNumJoints(self.r)):
self._p.changeDynamics(self.r, j, linearDamping=0, angularDamping=0)
info = self._p.getJointInfo(self.r, j)
#print(info)
jointName = info[1].decode("utf-8")
# print(jointName)
jointType = info[2]
if (jointType == self._p.JOINT_REVOLUTE):
jointIds.append(j)
# print("Appending %s" % jointName)
self.jointIdx.update({jointName: info[0]})
self.jointNameIdx.update({info[0]: jointName})
self.jointIds = []
for name in self.controlled_joints:
_id = self.jointIdx[name]
if _id in jointIds:
self.jointIds.append(_id)
if self.margin > 90*3.14/180:
for joint in self.controlled_joints:
# print("self joint didx", self.jointIdx)
info = self._p.getJointInfo(self.r, self.jointIdx[joint])
self.joint_limits_low.update({joint: (info[8])})
self.joint_limits_high.update({joint: (info[9])})
self.jointLowerLimit = []
self.jointUpperLimit = []
for jointName in self.controlled_joints:
self.jointLowerLimit.append(self.joint_limits_low[jointName])
self.jointUpperLimit.append(self.joint_limits_high[jointName])
self.joint_increment = (np.array(self.jointUpperLimit) - np.array(self.jointLowerLimit))/ (25 * 10)
self.resetJointStates(base_pos_nom, base_orn_nom, q_nom)
def getObservation(self):
self.base_pos, self.base_quat = self._p.getBasePositionAndOrientation(self.r)
self.base_vel, self.base_orn_vel = self._p.getBaseVelocity(self.r)
# print(self.base_pos)
# for idx in self.jointNameIdx.keys():
# print("Val: ", self._p.getLinkState(self.r, idx+1)[0])
queried_links = [
"head_imu_joint",
"leftElbowPitch",
"rightElbowPitch",
"left_hand",
"right_hand",
"torsoYaw",
"leftKneePitch",
"rightKneePitch",
"leftAnklePitch",
"rightAnklePitch",
]
queried_indices = []
for link in queried_links:
queried_indices.append(self.jointIdx[link])
self.linkStates = self._p.getLinkStates(self.r, queried_indices)
self.head_pos = self.linkStates[0][0]
self.left_elbow_pos = self.linkStates[1][0]
self.right_elbow_pos = self.linkStates[2][0]
self.left_hand_pos = self.linkStates[3][0]
self.right_hand_pos = self.linkStates[4][0]
self.torso_pos = self.linkStates[5][0]
self.left_knee_pos = self.linkStates[6][0]
self.right_knee_pos = self.linkStates[7][0]
self.left_foot_pos = self.linkStates[8][0]
self.right_foot_pos = self.linkStates[9][0]
# self.left_foot_pos = self._p.getLinkState(self.r, self.jointIdx["leftAnklePitch"])[0]
# self.right_foot_pos = self._p.getLinkState(self.r, self.jointIdx["rightAnklePitch"])[0]
# ankleRollContact = self._p.getContactPoints(self.r, self.plane, self.jointIdx['leftAnkleRoll'], -1)
# anklePitchContact = self._p.getContactPoints(self.r, self.plane, self.jointIdx['leftAnklePitch'], -1)
# ankleRollContact = self._p.getContactPoints(self.r, self.plane, self.jointIdx['rightAnkleRoll'], -1)
# anklePitchContact = self._p.getContactPoints(self.r, self.plane, self.jointIdx['rightAnklePitch'], -1)
leftContactInfo = self._p.getContactPoints(self.r, self.plane, self.jointIdx["leftAnkleRoll"], -1)
rightContactInfo = self._p.getContactPoints(self.r, self.plane, self.jointIdx["rightAnkleRoll"], -1)
# print(leftContactInfo)
self.leftFoot_isInContact = (len(leftContactInfo) > 0)
self.rightFoot_isInContact = (len(rightContactInfo) > 0)
# print("LEFT: %d, RIGHT: %d" % (self.leftFoot_isInContact, self.rightFoot_isInContact))
self.leftContactForce = [0, 0, 0]
self.rightContactForce = [0, 0, 0]
if self.leftFoot_isInContact:
for info in leftContactInfo:
contactNormal = np.array(info[7]) # contact normal of foot pointing towards plane
contactNormal = -contactNormal # contact normal of plane pointing towards foot
contactNormalForce = np.array(info[9])
F_contact = np.array(contactNormal)*contactNormalForce
self.leftContactForce += F_contact
if self.rightFoot_isInContact:
for info in rightContactInfo:
contactNormal = np.array(info[7]) # contact normal of foot pointing towards plane
contactNormal = -contactNormal # contact normal of plane pointing towards foot
contactNormalForce = np.array(info[9])
F_contact = np.array(contactNormal)*contactNormalForce
self.rightContactForce += F_contact
# print("LEFT CONTACT FORCE: ", self.leftContactForce)
# print("RIGHT CONTACT FORCE: ", self.rightContactForce)
for name in self.controlled_joints:
_id = self.jointIdx[name]
# print("%s: ID: %d" % (self.jointNameIdx[_id], _id))
self.joint_states.update({self.jointNameIdx[_id]: self._p.getJointState(self.r, _id)})
# print("NAME: ", name)
# print(self.joint_states[name])
"""Observation"""
observation = []
"""Yaw adjusted base linear velocity"""
self.base_orn = self._p.getEulerFromQuaternion(self.base_quat)
Rz = rotZ(self.base_orn[2])
self.Rz_i = | np.linalg.inv(Rz) | numpy.linalg.inv |
"""Implementations of the IPFP algorithm to solve for equilibrium and do comparative statics
in several variants of the `Choo and Siow 2006 <https://www.jstor.org/stable/10.1086/498585?seq=1>`_ model:
* homoskedastic with singles (as in Choo and Siow 2006)
* homoskedastic without singles
* gender-heteroskedastic: with a scale parameter on the error term for women
* gender- and type-heteroskedastic: with a scale parameter on the error term for each gender and type
* two-level nested logit, with nests and nest parameters that do not depend on the type, and {0} as the first nest
Each solver, when fed the joint surplus and margins, returns the equilibrium matching patterns, the adding-up errors on the margins,
and if requested (using `gr=True`) the derivatives of the matching patterns in all primitives.
"""
import numpy as np
from math import sqrt
from typing import Union, Tuple, List
import scipy.linalg as spla
from utils import print_stars, bs_error_abort, npexp, npmaxabs, \
nppow, der_nppow, nprepeat_col, nprepeat_row, describe_array, test_vector
TripleArrays = Tuple[np.ndarray, np.ndarray, np.ndarray]
IPFPnoGradientResults = Tuple[TripleArrays, np.ndarray, np.ndarray, np.ndarray]
IPFPGradientResults = Tuple[TripleArrays,
np.ndarray, np.ndarray, np.ndarray, TripleArrays]
IPFPResults = Union[IPFPnoGradientResults, IPFPGradientResults]
def _ipfp_check_sizes(men_margins: np.ndarray, women_margins: np.ndarray,
Phi: np.ndarray) -> Tuple[int]:
"""checks that the margins and surplus have the correct shapes and sizes """
X = test_vector(men_margins)
Y = test_vector(women_margins)
if Phi.shape != (X, Y):
bs_error_abort(f"The shape of Phi should be ({X}, {Y}")
return X, Y
def ipfp_homoskedastic_nosingles_solver(Phi: np.array, men_margins: np.array, women_margins: np.array,
tol: float = 1e-9, gr: bool = False, verbose: bool = False,
maxiter: int = 1000) \
-> IPFPnoGradientResults:
"""Solves for equilibrium in a Choo and Siow market without singles, given systematic surplus and margins
Args:
Phi: matrix of systematic surplus, shape (X, Y)
men_margins: vector of men margins, shape (X)
women_margins: vector of women margins, shape (Y)
tol: tolerance on change in solution
gr: if `True`, also evaluate derivatives of :math:`(\\mu_{xy})` wrt `Phi`
verbose: if `True`, prints information
maxiter: maximum number of iterations
Returns:
muxy: the matching patterns, shape (X, Y)
marg_err_x, marg_err_y: the errors on the margins
and the gradients of :math:`(\\mu_{xy})` wrt `Phi` if `gr` is `True`
"""
X, Y = _ipfp_check_sizes(men_margins, women_margins, Phi)
n_couples = np.sum(men_margins)
# check that there are as many men as women
if np.abs(np.sum(women_margins) - n_couples) > n_couples * tol:
bs_error_abort("There should be as many men as women")
ephi2, der_ephi2 = npexp(Phi / 2.0, deriv=True)
ephi2T = ephi2.T
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# starting with a reasonable initial point for tx and ty: : tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
bigc = sqrt(n_couples / np.sum(ephi2))
txi = np.full(X, bigc)
tyi = np.full(Y, bigc)
err_diff = bigc
tol_diff = tol * err_diff
niter = 0
while (err_diff > tol_diff) and (niter < maxiter):
sx = ephi2 @ tyi
tx = men_margins / sx
sy = ephi2T @ tx
ty = women_margins / sy
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi, tyi = tx, ty
niter += 1
muxy = ephi2 * np.outer(txi, tyi)
marg_err_x = np.sum(muxy, 1) - men_margins
marg_err_y = np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return muxy, marg_err_x, marg_err_y
else:
sxi = ephi2 @ tyi
syi = ephi2T @ txi
n_sum_categories = X + Y
n_prod_categories = X * Y
# start with the LHS of the linear system
lhs = np.zeros((n_sum_categories, n_sum_categories))
lhs[:X, :X] = np.diag(sxi)
lhs[:X, X:] = ephi2 * txi.reshape((-1, 1))
lhs[X:, X:] = np.diag(syi)
lhs[X:, :X] = ephi2T * tyi.reshape((-1, 1))
# now fill the RHS
n_cols_rhs = n_prod_categories
rhs = np.zeros((n_sum_categories, n_cols_rhs))
# to compute derivatives of (txi, tyi) wrt Phi
der_ephi2 /= (2.0 * ephi2) # 1/2 with safeguards
ivar = 0
for iman in range(X):
rhs[iman, ivar:(ivar + Y)] = - \
muxy[iman, :] * der_ephi2[iman, :]
ivar += Y
ivar1 = X
ivar2 = 0
for iwoman in range(Y):
rhs[ivar1, ivar2:n_cols_rhs:Y] = - \
muxy[:, iwoman] * der_ephi2[:, iwoman]
ivar1 += 1
ivar2 += 1
# solve for the derivatives of txi and tyi
dt_dT = spla.solve(lhs, rhs)
dt = dt_dT[:X, :]
dT = dt_dT[X:, :]
# now construct the derivatives of muxy
dmuxy = np.zeros((n_prod_categories, n_cols_rhs))
ivar = 0
for iman in range(X):
dt_man = dt[iman, :]
dmuxy[ivar:(ivar + Y),
:] = np.outer((ephi2[iman, :] * tyi), dt_man)
ivar += Y
for iwoman in range(Y):
dT_woman = dT[iwoman, :]
dmuxy[iwoman:n_prod_categories:Y,
:] += np.outer((ephi2[:, iwoman] * txi), dT_woman)
# add the term that comes from differentiating ephi2
muxy_vec2 = (muxy * der_ephi2).reshape(n_prod_categories)
dmuxy += np.diag(muxy_vec2)
return muxy, marg_err_x, marg_err_y, dmuxy
def ipfp_homoskedastic_solver(Phi: np.array, men_margins: np.array, women_margins: np.array,
tol: float = 1e-9, gr: bool = False, verbose: bool = False,
maxiter: int = 1000) -> IPFPResults:
"""Solves for equilibrium in a Choo and Siow market with singles, given systematic surplus and margins
Args:
Phi: matrix of systematic surplus, shape (X, Y)
men_margins: vector of men margins, shape (X)
women_margins: vector of women margins, shape (Y)
tol: tolerance on change in solution
gr: if `True`, also evaluate derivatives of the matching patterns
verbose: if `True`, prints information
maxiter: maximum number of iterations
Returns:
(muxy, mux0, mu0y): the matching patterns
marg_err_x, marg_err_y: the errors on the margins
and the gradients of the matching patterns wrt (men_margins, women_margins, Phi) if `gr` is `True`
"""
X, Y = _ipfp_check_sizes(men_margins, women_margins, Phi)
ephi2, der_ephi2 = npexp(Phi / 2.0, deriv=True)
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# where mux0=tx**2 and mu0y=ty**2
# starting with a reasonable initial point for tx and ty: tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
ephi2T = ephi2.T
nindivs = np.sum(men_margins) + np.sum(women_margins)
bigc = sqrt(nindivs / (X + Y + 2.0 * np.sum(ephi2)))
txi = np.full(X, bigc)
tyi = np.full(Y, bigc)
err_diff = bigc
tol_diff = tol * bigc
niter = 0
while (err_diff > tol_diff) and (niter < maxiter):
sx = ephi2 @ tyi
tx = (np.sqrt(sx * sx + 4.0 * men_margins) - sx) / 2.0
sy = ephi2T @ tx
ty = (np.sqrt(sy * sy + 4.0 * women_margins) - sy) / 2.0
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi = tx
tyi = ty
niter += 1
mux0 = txi * txi
mu0y = tyi * tyi
muxy = ephi2 * np.outer(txi, tyi)
marg_err_x = mux0 + np.sum(muxy, 1) - men_margins
marg_err_y = mu0y + np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return (muxy, mux0, mu0y), marg_err_x, marg_err_y
else: # we compute the derivatives
sxi = ephi2 @ tyi
syi = ephi2T @ txi
n_sum_categories = X + Y
n_prod_categories = X * Y
# start with the LHS of the linear system
lhs = np.zeros((n_sum_categories, n_sum_categories))
lhs[:X, :X] = np.diag(2.0 * txi + sxi)
lhs[:X, X:] = ephi2 * txi.reshape((-1, 1))
lhs[X:, X:] = np.diag(2.0 * tyi + syi)
lhs[X:, :X] = ephi2T * tyi.reshape((-1, 1))
# now fill the RHS
n_cols_rhs = n_sum_categories + n_prod_categories
rhs = np.zeros((n_sum_categories, n_cols_rhs))
# to compute derivatives of (txi, tyi) wrt men_margins
rhs[:X, :X] = np.eye(X)
# to compute derivatives of (txi, tyi) wrt women_margins
rhs[X:n_sum_categories,
X:n_sum_categories] = np.eye(Y)
# to compute derivatives of (txi, tyi) wrt Phi
der_ephi2 /= (2.0 * ephi2) # 1/2 with safeguards
ivar = n_sum_categories
for iman in range(X):
rhs[iman, ivar:(ivar + Y)] = - \
muxy[iman, :] * der_ephi2[iman, :]
ivar += Y
ivar1 = X
ivar2 = n_sum_categories
for iwoman in range(Y):
rhs[ivar1, ivar2:n_cols_rhs:Y] = - \
muxy[:, iwoman] * der_ephi2[:, iwoman]
ivar1 += 1
ivar2 += 1
# solve for the derivatives of txi and tyi
dt_dT = spla.solve(lhs, rhs)
dt = dt_dT[:X, :]
dT = dt_dT[X:, :]
# now construct the derivatives of the mus
dmux0 = 2.0 * (dt * txi.reshape((-1, 1)))
dmu0y = 2.0 * (dT * tyi.reshape((-1, 1)))
dmuxy = np.zeros((n_prod_categories, n_cols_rhs))
ivar = 0
for iman in range(X):
dt_man = dt[iman, :]
dmuxy[ivar:(ivar + Y),
:] = np.outer((ephi2[iman, :] * tyi), dt_man)
ivar += Y
for iwoman in range(Y):
dT_woman = dT[iwoman, :]
dmuxy[iwoman:n_prod_categories:Y,
:] += np.outer((ephi2[:, iwoman] * txi), dT_woman)
# add the term that comes from differentiating ephi2
muxy_vec2 = (muxy * der_ephi2).reshape(n_prod_categories)
dmuxy[:, n_sum_categories:] += np.diag(muxy_vec2)
return (muxy, mux0, mu0y), marg_err_x, marg_err_y, (dmuxy, dmux0, dmu0y)
def ipfp_gender_heteroskedastic_solver(Phi: np.array, men_margins: np.array, women_margins: np.array,
tau: float, tol: float = 1e-9, gr: bool = False, verbose: bool = False,
maxiter: int = 1000) -> IPFPResults:
"""Solves for equilibrium in a in a gender-heteroskedastic Choo and Siow market
given systematic surplus and margins and a scale parameter `tau`
Args:
Phi: matrix of systematic surplus, shape (X, Y)
men_margins: vector of men margins, shape (X)
women_margins: vector of women margins, shape (Y)
tau: the standard error for all women
tol: tolerance on change in solution
gr: if `True`, also evaluate derivatives of the matching patterns
verbose: if `True`, prints information
maxiter: maximum number of iterations
Returns:
(muxy, mux0, mu0y): the matching patterns
marg_err_x, marg_err_y: the errors on the margins
and the gradients of the matching patterns wrt (men_margins, women_margins, Phi, tau) if `gr` is `True`
"""
X, Y = _ipfp_check_sizes(men_margins, women_margins, Phi)
if tau <= 0:
bs_error_abort(f"We need a positive tau, not {tau}")
#############################################################################
# we use ipfp_heteroxy_solver with sigma_x = 1 and tau_y = tau
#############################################################################
sigma_x = np.ones(X)
tau_y = np.full(Y, tau)
if gr:
mus, marg_err_x, marg_err_y, dmus_hxy = \
ipfp_heteroskedastic_solver(Phi, men_margins, women_margins,
sigma_x, tau_y, tol=tol, gr=True,
maxiter=maxiter, verbose=verbose)
dmus_xy, dmus_x0, dmus_0y = dmus_hxy
n_sum_categories = X + Y
n_prod_categories = X * Y
n_cols = n_sum_categories + n_prod_categories
itau_y = n_cols + X
dmuxy = np.zeros((n_prod_categories, n_cols + 1))
dmuxy[:, :n_cols] = dmus_xy[:, :n_cols]
dmuxy[:, -1] = np.sum(dmus_xy[:, itau_y:], 1)
dmux0 = np.zeros((X, n_cols + 1))
dmux0[:, :n_cols] = dmus_x0[:, :n_cols]
dmux0[:, -1] = np.sum(dmus_x0[:, itau_y:], 1)
dmu0y = np.zeros((Y, n_cols + 1))
dmu0y[:, :n_cols] = dmus_0y[:, :n_cols]
dmu0y[:, -1] = np.sum(dmus_0y[:, itau_y:], 1)
return (muxy, mux0, mu0y), marg_err_x, marg_err_y, (dmuxy, dmux0, dmu0y)
else:
return ipfp_heteroskedastic_solver(Phi, men_margins, women_margins,
sigma_x, tau_y, tol=tol, gr=False,
maxiter=maxiter, verbose=verbose)
def ipfp_heteroskedastic_solver(Phi: np.array, men_margins: np.array, women_margins: np.array,
sigma_x: np.array, tau_y: np.array, tol: float = 1e-9, gr: bool = False,
verbose: bool = False,
maxiter: int = 1000) -> IPFPResults:
"""Solves for equilibrium in a in a fully heteroskedastic Choo and Siow market
given systematic surplus and margins and standard errors `sigma_x` and `tau_y`
Args:
Phi: matrix of systematic surplus, shape (X, Y)
men_margins: vector of men margins, shape (X)
women_margins: vector of women margins, shape (Y)
sigma_x: the vector of standard errors for the X types of men
sigma_x: the vector of standard errors for Y types of women
tol: tolerance on change in solution
gr: if `True`, also evaluate derivatives of the matching patterns
verbose: if `True`, prints information
maxiter: maximum number of iterations
Returns:
(muxy, mux0, mu0y): the matching patterns
marg_err_x, marg_err_y: the errors on the margins
and the gradients of the matching patterns wrt (men_margins, women_margins, Phi, sigma_x, tau_y)
if `gr` is `True`
"""
X, Y = _ipfp_check_sizes(men_margins, women_margins, Phi)
if np.min(sigma_x) <= 0.0:
bs_error_abort(f"All elements of sigma_x must be positive")
if np.min(tau_y) <= 0.0:
bs_error_abort(f"All elements of tau_y must be positive")
sumxy1 = 1.0 / np.add.outer(sigma_x, tau_y)
ephi2, der_ephi2 = npexp(Phi * sumxy1, deriv=True)
#############################################################################
# we solve the equilibrium equations muxy = ephi2 * tx * ty
# with tx = mux0^(sigma_x/(sigma_x + tau_max))
# and ty = mu0y^(tau_y/(sigma_max + tau_y))
# starting with a reasonable initial point for tx and ty: tx = ty = bigc
# it is important that it fit the number of individuals
#############################################################################
nindivs = np.sum(men_margins) + np.sum(women_margins)
bigc = nindivs / (X + Y + 2.0 * np.sum(ephi2))
# we find the largest values of sigma_x and tau_y
xmax = np.argmax(sigma_x)
sigma_max = sigma_x[xmax]
ymax = np.argmax(tau_y)
tau_max = tau_y[ymax]
# we use tx = mux0^(sigma_x/(sigma_x + tau_max))
# and ty = mu0y^(tau_y/(sigma_max + tau_y))
sig_taumax = sigma_x + tau_max
txi = np.power(bigc, sigma_x / sig_taumax)
sigmax_tau = tau_y + sigma_max
tyi = np.power(bigc, tau_y / sigmax_tau)
err_diff = bigc
tol_diff = tol * bigc
tol_newton = tol
niter = 0
while (err_diff > tol_diff) and (niter < maxiter): # IPFP main loop
# Newton iterates for men
err_newton = bigc
txin = txi.copy()
mu0y_in = np.power(np.power(tyi, sigmax_tau), 1.0 / tau_y)
while err_newton > tol_newton:
txit = np.power(txin, sig_taumax)
mux0_in = np.power(txit, 1.0 / sigma_x)
out_xy = np.outer(np.power(mux0_in, sigma_x),
np.power(mu0y_in, tau_y))
muxy_in = ephi2 * np.power(out_xy, sumxy1)
errxi = mux0_in + np.sum(muxy_in, 1) - men_margins
err_newton = npmaxabs(errxi)
txin -= errxi / (sig_taumax * (mux0_in / sigma_x
+ np.sum(sumxy1 * muxy_in, 1)) / txin)
tx = txin
# Newton iterates for women
err_newton = bigc
tyin = tyi.copy()
mux0_in = np.power(np.power(tx, sig_taumax), 1.0 / sigma_x)
while err_newton > tol_newton:
tyit = np.power(tyin, sigmax_tau)
mu0y_in = np.power(tyit, 1.0 / tau_y)
out_xy = np.outer(np.power(mux0_in, sigma_x),
np.power(mu0y_in, tau_y))
muxy_in = ephi2 * np.power(out_xy, sumxy1)
erryi = mu0y_in + np.sum(muxy_in, 0) - women_margins
err_newton = npmaxabs(erryi)
tyin -= erryi / (sigmax_tau * (mu0y_in / tau_y
+ np.sum(sumxy1 * muxy_in, 0)) / tyin)
ty = tyin
err_x = npmaxabs(tx - txi)
err_y = npmaxabs(ty - tyi)
err_diff = err_x + err_y
txi = tx
tyi = ty
niter += 1
mux0 = mux0_in
mu0y = mu0y_in
muxy = muxy_in
marg_err_x = mux0 + np.sum(muxy, 1) - men_margins
marg_err_y = mu0y + np.sum(muxy, 0) - women_margins
if verbose:
print(f"After {niter} iterations:")
print(f"\tMargin error on x: {npmaxabs(marg_err_x)}")
print(f"\tMargin error on y: {npmaxabs(marg_err_y)}")
if not gr:
return (muxy, mux0, mu0y), marg_err_x, marg_err_y
else: # we compute the derivatives
n_sum_categories = X + Y
n_prod_categories = X * Y
# we work directly with (mux0, mu0y)
sigrat_xy = sumxy1 * sigma_x.reshape((-1, 1))
taurat_xy = 1.0 - sigrat_xy
mux0_mat = nprepeat_col(mux0, Y)
mu0y_mat = nprepeat_row(mu0y, X)
# muxy = axy * bxy * ephi2
axy = nppow(mux0_mat, sigrat_xy)
bxy = nppow(mu0y_mat, taurat_xy)
der_axy1, der_axy2 = der_nppow(mux0_mat, sigrat_xy)
der_bxy1, der_bxy2 = der_nppow(mu0y_mat, taurat_xy)
der_axy1_rat, der_axy2_rat = der_axy1 / axy, der_axy2 / axy
der_bxy1_rat, der_bxy2_rat = der_bxy1 / bxy, der_bxy2 / bxy
# start with the LHS of the linear system on (dmux0, dmu0y)
lhs = | np.zeros((n_sum_categories, n_sum_categories)) | numpy.zeros |
"""Models and utilities for processing SMIRNOFF data."""
import abc
import copy
import functools
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from openff.toolkit.topology import Molecule
from openff.toolkit.typing.engines.smirnoff.parameters import (
AngleHandler,
BondHandler,
ChargeIncrementModelHandler,
ConstraintHandler,
ElectrostaticsHandler,
ImproperTorsionHandler,
LibraryChargeHandler,
ParameterHandler,
ProperTorsionHandler,
ToolkitAM1BCCHandler,
UnassignedProperTorsionParameterException,
UnassignedValenceParameterException,
VirtualSiteHandler,
vdWHandler,
)
from openff.units import unit
from openff.units.openmm import from_openmm
from openmm import unit as omm_unit
from pydantic import Field
from typing_extensions import Literal
from openff.interchange.components.potentials import (
Potential,
PotentialHandler,
WrappedPotential,
)
from openff.interchange.exceptions import (
InvalidParameterHandlerError,
MissingParametersError,
SMIRNOFFParameterAttributeNotImplementedError,
)
from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey
from openff.interchange.types import FloatQuantity
kcal_mol = omm_unit.kilocalorie_per_mole
kcal_mol_angstroms = kcal_mol / omm_unit.angstrom ** 2
kcal_mol_radians = kcal_mol / omm_unit.radian ** 2
if TYPE_CHECKING:
from openff.toolkit.topology import Topology
from openff.interchange.components.mdtraj import _OFFBioTop
ElectrostaticsHandlerType = Union[
ElectrostaticsHandler,
ChargeIncrementModelHandler,
LibraryChargeHandler,
ToolkitAM1BCCHandler,
]
T = TypeVar("T", bound="SMIRNOFFPotentialHandler")
TP = TypeVar("TP", bound="PotentialHandler")
class SMIRNOFFPotentialHandler(PotentialHandler, abc.ABC):
"""Base class for handlers storing potentials produced by SMIRNOFF force fields."""
@classmethod
@abc.abstractmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def supported_parameters(cls):
"""Return a list of parameter attributes supported by this handler."""
raise NotImplementedError()
# @classmethod
# @abc.abstractmethod
# def valence_terms(cls, topology):
# """Return an interable of all of one type of valence term in this topology."""
# raise NotImplementedError()
@classmethod
def check_supported_parameters(cls, parameter_handler: ParameterHandler):
"""Verify that a parameter handler is in an allowed list of handlers."""
for parameter in parameter_handler.parameters:
for parameter_attribute in parameter._get_defined_parameter_attributes():
if parameter_attribute not in cls.supported_parameters():
raise SMIRNOFFParameterAttributeNotImplementedError(
parameter_attribute,
)
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""Populate self.slot_map with key-val pairs of [TopologyKey, PotentialKey]."""
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
# TODO: Should the slot_map always be reset, or should we be able to partially
# update it? Also Note the duplicated code in the child classes
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
topology_key = TopologyKey(atom_indices=key)
potential_key = PotentialKey(
id=val.parameter_type.smirks, associated_handler=parameter_handler_name
)
self.slot_map[topology_key] = potential_key
if self.__class__.__name__ in ["SMIRNOFFBondHandler", "SMIRNOFFAngleHandler"]:
valence_terms = self.valence_terms(topology) # type: ignore[attr-defined]
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=valence_terms,
exception_cls=UnassignedValenceParameterException,
)
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: TP,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFPotentialHandler from toolkit data.
"""
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(type(parameter_handler))
handler = cls()
if hasattr(handler, "fractional_bond_order_method"):
if getattr(parameter_handler, "fractional_bondorder_method", None):
handler.fractional_bond_order_method = ( # type: ignore[attr-defined]
parameter_handler.fractional_bondorder_method # type: ignore[attr-defined]
)
handler.fractional_bond_order_interpolation = ( # type: ignore[attr-defined]
parameter_handler.fractional_bondorder_interpolation # type: ignore[attr-defined]
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFBondHandler(SMIRNOFFPotentialHandler):
"""Handler storing bond potentials as produced by a SMIRNOFF force field."""
type: Literal["Bonds"] = "Bonds"
expression: Literal["k/2*(r-length)**2"] = "k/2*(r-length)**2"
fractional_bond_order_method: Literal["AM1-Wiberg"] = "AM1-Wiberg"
fractional_bond_order_interpolation: Literal["linear"] = "linear"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [BondHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "length", "k_bondorder", "length_bondorder"]
@classmethod
def valence_terms(cls, topology):
"""Return all bonds in this topology."""
return [list(b.atoms) for b in topology.topology_bonds]
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
# TODO: Should the slot_map always be reset, or should we be able to partially
# update it? Also Note the duplicated code in the child classes
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
param = val.parameter_type
if param.k_bondorder or param.length_bondorder:
top_bond = topology.get_bond_between(*key)
fractional_bond_order = top_bond.bond.fractional_bond_order
if not fractional_bond_order:
raise RuntimeError(
"Bond orders should already be assigned at this point"
)
else:
fractional_bond_order = None
topology_key = TopologyKey(
atom_indices=key, bond_order=fractional_bond_order
)
potential_key = PotentialKey(
id=val.parameter_type.smirks,
associated_handler=parameter_handler_name,
bond_order=fractional_bond_order,
)
self.slot_map[topology_key] = potential_key
valence_terms = self.valence_terms(topology)
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=valence_terms,
exception_cls=UnassignedValenceParameterException,
)
def store_potentials(self, parameter_handler: "BondHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
if self.potentials:
self.potentials = dict()
for topology_key, potential_key in self.slot_map.items():
smirks = potential_key.id
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
if topology_key.bond_order: # type: ignore[union-attr]
bond_order = topology_key.bond_order # type: ignore[union-attr]
if parameter.k_bondorder:
data = parameter.k_bondorder
else:
data = parameter.length_bondorder
coeffs = _get_interpolation_coeffs(
fractional_bond_order=bond_order,
data=data,
)
pots = []
map_keys = [*data.keys()]
for map_key in map_keys:
pots.append(
Potential(
parameters={
"k": parameter.k_bondorder[map_key],
"length": parameter.length_bondorder[map_key],
},
map_key=map_key,
)
)
potential = WrappedPotential(
{pot: coeff for pot, coeff in zip(pots, coeffs)}
)
else:
potential = Potential( # type: ignore[assignment]
parameters={
"k": parameter.k,
"length": parameter.length,
},
)
self.potentials[potential_key] = potential
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: "BondHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFBondHandler from toolkit data.
"""
# TODO: This method overrides SMIRNOFFPotentialHandler.from_toolkit in order to gobble up
# a ConstraintHandler. This seems like a good solution for the interdependence, but is also
# not a great practice. A better solution would involve not overriding the method with a
# different function signature.
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError
handler: T = cls(type="Bonds", expression="k/2*(r-length)**2")
if (
any(
getattr(p, "k_bondorder", None) is not None
for p in parameter_handler.parameters
)
) or (
any(
getattr(p, "length_bondorder", None) is not None
for p in parameter_handler.parameters
)
):
for ref_mol in topology.reference_molecules:
# TODO: expose conformer generation and fractional bond order assigment
# knobs to user via API
ref_mol.generate_conformers(n_conformers=1)
ref_mol.assign_fractional_bond_orders(
bond_order_model=handler.fractional_bond_order_method.lower(), # type: ignore[attr-defined]
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFConstraintHandler(SMIRNOFFPotentialHandler):
"""Handler storing constraint potentials as produced by a SMIRNOFF force field."""
type: Literal["Constraints"] = "Constraints"
expression: Literal[""] = ""
constraints: Dict[
PotentialKey, bool
] = dict() # should this be named potentials for consistency?
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [BondHandler, ConstraintHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "length", "distance"]
@classmethod
def _from_toolkit( # type: ignore[override]
cls: Type[T],
parameter_handler: List,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFPotentialHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
for parameter_handler in parameter_handlers:
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(type(parameter_handler))
handler = cls()
handler.store_constraints( # type: ignore[attr-defined]
parameter_handlers=parameter_handlers, topology=topology
)
return handler
def store_constraints(
self,
parameter_handlers: Any,
topology: "_OFFBioTop",
) -> None:
"""Store constraints."""
if self.slot_map:
self.slot_map = dict()
constraint_handler = [
p for p in parameter_handlers if type(p) == ConstraintHandler
][0]
constraint_matches = constraint_handler.find_matches(topology)
if any([type(p) == BondHandler for p in parameter_handlers]):
bond_handler = [p for p in parameter_handlers if type(p) == BondHandler][0]
bonds = SMIRNOFFBondHandler._from_toolkit(
parameter_handler=bond_handler,
topology=topology,
)
else:
bond_handler = None
bonds = None
for key, match in constraint_matches.items():
topology_key = TopologyKey(atom_indices=key)
smirks = match.parameter_type.smirks
distance = match.parameter_type.distance
if distance is not None:
# This constraint parameter is fully specified
potential_key = PotentialKey(
id=smirks, associated_handler="Constraints"
)
distance = match.parameter_type.distance
else:
# This constraint parameter depends on the BondHandler ...
if bond_handler is None:
raise MissingParametersError(
f"Constraint with SMIRKS pattern {smirks} found with no distance "
"specified, and no corresponding bond parameters were found. The distance "
"of this constraint is not specified."
)
# ... so use the same PotentialKey instance as the BondHandler to look up the distance
potential_key = bonds.slot_map[topology_key] # type: ignore[union-attr]
self.slot_map[topology_key] = potential_key
distance = bonds.potentials[potential_key].parameters["length"] # type: ignore[union-attr]
potential = Potential(
parameters={
"distance": distance,
}
)
self.constraints[potential_key] = potential # type: ignore[assignment]
class SMIRNOFFAngleHandler(SMIRNOFFPotentialHandler):
"""Handler storing angle potentials as produced by a SMIRNOFF force field."""
type: Literal["Angles"] = "Angles"
expression: Literal["k/2*(theta-angle)**2"] = "k/2*(theta-angle)**2"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [AngleHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attributes."""
return ["smirks", "id", "k", "angle"]
@classmethod
def valence_terms(cls, topology):
"""Return all angles in this topology."""
return list(topology.angles)
def store_potentials(self, parameter_handler: "AngleHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for potential_key in self.slot_map.values():
smirks = potential_key.id
# ParameterHandler.get_parameter returns a list, although this
# should only ever be length 1
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
potential = Potential(
parameters={
"k": parameter.k,
"angle": parameter.angle,
},
)
self.potentials[potential_key] = potential
@classmethod
def f_from_toolkit(
cls: Type[T],
parameter_handler: "AngleHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFAngleHandler from toolkit data.
"""
handler = cls()
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFProperTorsionHandler(SMIRNOFFPotentialHandler):
"""Handler storing proper torsions potentials as produced by a SMIRNOFF force field."""
type: Literal["ProperTorsions"] = "ProperTorsions"
expression: Literal[
"k*(1+cos(periodicity*theta-phase))"
] = "k*(1+cos(periodicity*theta-phase))"
fractional_bond_order_method: Literal["AM1-Wiberg"] = "AM1-Wiberg"
fractional_bond_order_interpolation: Literal["linear"] = "linear"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [ProperTorsionHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "periodicity", "phase", "idivf", "k_bondorder"]
def store_matches(
self,
parameter_handler: "ProperTorsionHandler",
topology: "_OFFBioTop",
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
param = val.parameter_type
n_terms = len(val.parameter_type.phase)
for n in range(n_terms):
smirks = param.smirks
if param.k_bondorder:
# The relevant bond order is that of the _central_ bond in the torsion
top_bond = topology.get_bond_between(key[1], key[2])
fractional_bond_order = top_bond.bond.fractional_bond_order
if not fractional_bond_order:
raise RuntimeError(
"Bond orders should already be assigned at this point"
)
else:
fractional_bond_order = None
topology_key = TopologyKey(
atom_indices=key, mult=n, bond_order=fractional_bond_order
)
potential_key = PotentialKey(
id=smirks,
mult=n,
associated_handler="ProperTorsions",
bond_order=fractional_bond_order,
)
self.slot_map[topology_key] = potential_key
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=list(topology.propers),
exception_cls=UnassignedProperTorsionParameterException,
)
def store_potentials(self, parameter_handler: "ProperTorsionHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for topology_key, potential_key in self.slot_map.items():
smirks = potential_key.id
n = potential_key.mult
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
# n_terms = len(parameter.k)
if topology_key.bond_order: # type: ignore[union-attr]
bond_order = topology_key.bond_order # type: ignore[union-attr]
data = parameter.k_bondorder[n]
coeffs = _get_interpolation_coeffs(
fractional_bond_order=bond_order,
data=data,
)
pots = []
map_keys = [*data.keys()]
for map_key in map_keys:
parameters = {
"k": parameter.k_bondorder[n][map_key],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": parameter.idivf[n] * unit.dimensionless,
}
pots.append(
Potential(
parameters=parameters,
map_key=map_key,
)
)
potential = WrappedPotential(
{pot: coeff for pot, coeff in zip(pots, coeffs)}
)
else:
parameters = {
"k": parameter.k[n],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": parameter.idivf[n] * unit.dimensionless,
}
potential = Potential(parameters=parameters) # type: ignore[assignment]
self.potentials[potential_key] = potential
class SMIRNOFFImproperTorsionHandler(SMIRNOFFPotentialHandler):
"""Handler storing improper torsions potentials as produced by a SMIRNOFF force field."""
type: Literal["ImproperTorsions"] = "ImproperTorsions"
expression: Literal[
"k*(1+cos(periodicity*theta-phase))"
] = "k*(1+cos(periodicity*theta-phase))"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [ImproperTorsionHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "periodicity", "phase", "idivf"]
def store_matches(
self, parameter_handler: "ImproperTorsionHandler", topology: "_OFFBioTop"
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
parameter_handler._assert_correct_connectivity(
val,
[
(0, 1),
(1, 2),
(1, 3),
],
)
n_terms = len(val.parameter_type.k)
for n in range(n_terms):
smirks = val.parameter_type.smirks
non_central_indices = [key[0], key[2], key[3]]
for permuted_key in [
(
non_central_indices[i],
non_central_indices[j],
non_central_indices[k],
)
for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]
]:
topology_key = TopologyKey(
atom_indices=(key[1], *permuted_key), mult=n
)
potential_key = PotentialKey(
id=smirks, mult=n, associated_handler="ImproperTorsions"
)
self.slot_map[topology_key] = potential_key
def store_potentials(self, parameter_handler: "ImproperTorsionHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for potential_key in self.slot_map.values():
smirks = potential_key.id
n = potential_key.mult
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
parameters = {
"k": parameter.k[n],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": 3.0 * unit.dimensionless,
}
potential = Potential(parameters=parameters)
self.potentials[potential_key] = potential
class _SMIRNOFFNonbondedHandler(SMIRNOFFPotentialHandler, abc.ABC):
"""Base class for handlers storing non-bonded potentials produced by SMIRNOFF force fields."""
type: Literal["nonbonded"] = "nonbonded"
cutoff: FloatQuantity["angstrom"] = Field( # type: ignore
9.0 * unit.angstrom,
description="The distance at which pairwise interactions are truncated",
)
scale_13: float = Field(
0.0, description="The scaling factor applied to 1-3 interactions"
)
scale_14: float = Field(
0.5, description="The scaling factor applied to 1-4 interactions"
)
scale_15: float = Field(
1.0, description="The scaling factor applied to 1-5 interactions"
)
class SMIRNOFFvdWHandler(_SMIRNOFFNonbondedHandler):
"""Handler storing vdW potentials as produced by a SMIRNOFF force field."""
type: Literal["vdW"] = "vdW" # type: ignore[assignment]
expression: Literal[
"4*epsilon*((sigma/r)**12-(sigma/r)**6)"
] = "4*epsilon*((sigma/r)**12-(sigma/r)**6)"
method: Literal["cutoff", "pme", "no-cutoff"] = Field("cutoff")
mixing_rule: Literal["lorentz-berthelot", "geometric"] = Field(
"lorentz-berthelot",
description="The mixing rule (combination rule) used in computing pairwise vdW interactions",
)
switch_width: FloatQuantity["angstrom"] = Field( # type: ignore
1.0 * unit.angstrom,
description="The width over which the switching function is applied",
)
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [vdWHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attributes."""
return ["smirks", "id", "sigma", "epsilon", "rmin_half"]
def store_potentials(self, parameter_handler: vdWHandler) -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
self.method = parameter_handler.method.lower()
self.cutoff = parameter_handler.cutoff
for potential_key in self.slot_map.values():
smirks = potential_key.id
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
try:
potential = Potential(
parameters={
"sigma": parameter.sigma,
"epsilon": parameter.epsilon,
},
)
except AttributeError:
# Handle rmin_half pending https://github.com/openforcefield/openff-toolkit/pull/750
potential = Potential(
parameters={
"sigma": parameter.sigma,
"epsilon": parameter.epsilon,
},
)
self.potentials[potential_key] = potential
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: "vdWHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFvdWHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
for parameter_handler in parameter_handlers:
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(
f"Found parameter handler type {type(parameter_handler)}, which is not "
f"supported by potential type {type(cls)}"
)
handler = cls(
scale_13=parameter_handler.scale13,
scale_14=parameter_handler.scale14,
scale_15=parameter_handler.scale15,
cutoff=parameter_handler.cutoff,
mixing_rule=parameter_handler.combining_rules.lower(),
method=parameter_handler.method.lower(),
switch_width=parameter_handler.switch_width,
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
@classmethod
def parameter_handler_precedence(cls) -> List[str]:
"""
Return the order in which parameter handlers take precedence when computing charges.
"""
return ["vdw", "VirtualSites"]
def _from_toolkit_virtual_sites(
self,
parameter_handler: "VirtualSiteHandler",
topology: "Topology",
):
# TODO: Merge this logic into _from_toolkit
if not all(
isinstance(
p,
(
VirtualSiteHandler.VirtualSiteBondChargeType,
VirtualSiteHandler.VirtualSiteMonovalentLonePairType,
VirtualSiteHandler.VirtualSiteDivalentLonePairType,
VirtualSiteHandler.VirtualSiteTrivalentLonePairType,
),
)
for p in parameter_handler.parameters
):
raise NotImplementedError("Found unsupported virtual site types")
matches = parameter_handler.find_matches(topology)
for atoms, parameter_match in matches.items():
virtual_site_type = parameter_match[0].parameter_type
top_key = VirtualSiteKey(
atom_indices=atoms,
type=virtual_site_type.type,
match=virtual_site_type.match,
)
pot_key = PotentialKey(
id=virtual_site_type.smirks, associated_handler=virtual_site_type.type
)
pot = Potential(
parameters={
"sigma": virtual_site_type.sigma,
"epsilon": virtual_site_type.epsilon,
# "distance": virtual_site_type.distance,
}
)
# if virtual_site_type.type in {"MonovalentLonePair", "DivalentLonePair"}:
# pot.parameters.update(
# {
# "outOfPlaneAngle": virtual_site_type.outOfPlaneAngle,
# }
# )
# if virtual_site_type.type in {"MonovalentLonePair"}:
# pot.parameters.update(
# {
# "inPlaneAngle": virtual_site_type.inPlaneAngle,
# }
# )
self.slot_map.update({top_key: pot_key})
self.potentials.update({pot_key: pot})
class SMIRNOFFElectrostaticsHandler(_SMIRNOFFNonbondedHandler):
"""
A handler which stores any electrostatic parameters applied to a topology.
This handler is responsible for grouping together
* global settings for the electrostatic interactions such as the cutoff distance
and the intramolecular scale factors.
* partial charges which have been assigned by a ``ToolkitAM1BCC``,
``LibraryCharges``, or a ``ChargeIncrementModel`` parameter
handler.
* charge corrections applied by a ``SMIRNOFFChargeIncrementHandler``.
rather than having each in their own handler.
"""
type: Literal["Electrostatics"] = "Electrostatics" # type: ignore[assignment]
expression: Literal["coul"] = "coul"
method: Literal["pme", "cutoff", "reaction-field", "no-cutoff"] = Field("pme")
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [
LibraryChargeHandler,
ChargeIncrementModelHandler,
ToolkitAM1BCCHandler,
ElectrostaticsHandler,
]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
pass
@property
def charges(self) -> Dict[Union[TopologyKey, VirtualSiteKey], unit.Quantity]:
"""Get the total partial charge on each atom, excluding virtual sites."""
return self.get_charges(include_virtual_sites=False)
@property
def charges_with_virtual_sites(
self,
) -> Dict[Union[VirtualSiteKey, TopologyKey], unit.Quantity]:
"""Get the total partial charge on each atom, including virtual sites."""
return self.get_charges(include_virtual_sites=True)
def get_charges(
self, include_virtual_sites=False
) -> Dict[Union[VirtualSiteKey, TopologyKey], unit.Quantity]:
"""Get the total partial charge on each atom or particle."""
charges: DefaultDict[
Union[TopologyKey, VirtualSiteKey], FloatQuantity
] = defaultdict(lambda: 0.0 * unit.e)
for topology_key, potential_key in self.slot_map.items():
potential = self.potentials[potential_key]
for parameter_key, parameter_value in potential.parameters.items():
if parameter_key == "charge_increments":
if type(topology_key) != VirtualSiteKey:
raise RuntimeError
charge = -1.0 * np.sum(parameter_value)
# assumes virtual sites can only have charges determined in one step
# also, topology_key is actually a VirtualSiteKey
charges[topology_key] = charge
elif parameter_key in ["charge", "charge_increment"]:
charge = parameter_value
charges[topology_key.atom_indices[0]] += charge # type: ignore
else:
raise NotImplementedError()
returned_charges: Dict[
Union[VirtualSiteKey, TopologyKey], unit.Quantity
] = dict()
for index, charge in charges.items():
if isinstance(index, int):
returned_charges[TopologyKey(atom_indices=(index,))] = charge
else:
if include_virtual_sites:
returned_charges[index] = charge
return returned_charges
@classmethod
def parameter_handler_precedence(cls) -> List[str]:
"""
Return the order in which parameter handlers take precedence when computing charges.
"""
return ["LibraryCharges", "ChargeIncrementModel", "ToolkitAM1BCC"]
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: Any,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFElectrostaticsHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
toolkit_handler_with_metadata = [
p for p in parameter_handlers if type(p) == ElectrostaticsHandler
][0]
handler = cls(
type=toolkit_handler_with_metadata._TAGNAME,
scale_13=toolkit_handler_with_metadata.scale13,
scale_14=toolkit_handler_with_metadata.scale14,
scale_15=toolkit_handler_with_metadata.scale15,
cutoff=toolkit_handler_with_metadata.cutoff,
method=toolkit_handler_with_metadata.method.lower(),
)
handler.store_matches(parameter_handlers, topology)
return handler
def _from_toolkit_virtual_sites(
self,
parameter_handler: "VirtualSiteHandler",
topology: "Topology",
):
# TODO: Merge this logic into _from_toolkit
if not all(
isinstance(
p,
(
VirtualSiteHandler.VirtualSiteBondChargeType,
VirtualSiteHandler.VirtualSiteMonovalentLonePairType,
VirtualSiteHandler.VirtualSiteDivalentLonePairType,
VirtualSiteHandler.VirtualSiteTrivalentLonePairType,
),
)
for p in parameter_handler.parameters
):
raise NotImplementedError("Found unsupported virtual site types")
matches = parameter_handler.find_matches(topology)
for atom_indices, parameter_match in matches.items():
virtual_site_type = parameter_match[0].parameter_type
virtual_site_key = VirtualSiteKey(
atom_indices=atom_indices,
type=virtual_site_type.type,
match=virtual_site_type.match,
)
virtual_site_potential_key = PotentialKey(
id=virtual_site_type.smirks,
associated_handler="VirtualSiteHandler",
)
virtual_site_potential = Potential(
parameters={
"charge_increments": from_openmm(
virtual_site_type.charge_increment
),
}
)
matches = {}
potentials = {}
self.slot_map.update({virtual_site_key: virtual_site_potential_key})
self.potentials.update({virtual_site_potential_key: virtual_site_potential})
# TODO: Counter-intuitive that toolkit regression tests pass by using the counter
# variable i as if it was the atom index - shouldn't it just use atom_index?
for i, atom_index in enumerate(atom_indices): # noqa
topology_key = TopologyKey(atom_indices=(i,), mult=2)
potential_key = PotentialKey(
id=virtual_site_type.smirks,
mult=i,
associated_handler="VirtualSiteHandler",
)
charge_increment = getattr(
virtual_site_type, f"charge_increment{i + 1}"
)
potential = Potential(
parameters={"charge_increment": from_openmm(charge_increment)}
)
matches[topology_key] = potential_key
potentials[potential_key] = potential
self.slot_map.update(matches)
self.potentials.update(potentials)
@classmethod
@functools.lru_cache(None)
def _compute_partial_charges(cls, molecule: Molecule, method: str) -> unit.Quantity:
"""Call out to the toolkit's toolkit wrappers to generate partial charges."""
molecule = copy.deepcopy(molecule)
molecule.assign_partial_charges(method)
return from_openmm(molecule.partial_charges)
@classmethod
def _library_charge_to_potentials(
cls,
atom_indices: Tuple[int, ...],
parameter: LibraryChargeHandler.LibraryChargeType,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Map a matched library charge parameter to a set of potentials.
"""
matches = {}
potentials = {}
for i, (atom_index, charge) in enumerate(zip(atom_indices, parameter.charge)):
topology_key = TopologyKey(atom_indices=(atom_index,))
potential_key = PotentialKey(
id=parameter.smirks, mult=i, associated_handler="LibraryCharges"
)
potential = Potential(parameters={"charge": from_openmm(charge)})
matches[topology_key] = potential_key
potentials[potential_key] = potential
return matches, potentials
@classmethod
def _charge_increment_to_potentials(
cls,
atom_indices: Tuple[int, ...],
parameter: ChargeIncrementModelHandler.ChargeIncrementType,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Map a matched charge increment parameter to a set of potentials.
"""
matches = {}
potentials = {}
for i, atom_index in enumerate(atom_indices):
topology_key = TopologyKey(atom_indices=(atom_index,))
potential_key = PotentialKey(
id=parameter.smirks, mult=i, associated_handler="ChargeIncrementModel"
)
# TODO: Handle the cases where n - 1 charge increments have been defined,
# maybe by implementing this in the TK?
charge_increment = getattr(parameter, f"charge_increment{i + 1}")
potential = Potential(
parameters={"charge_increment": from_openmm(charge_increment)}
)
matches[topology_key] = potential_key
potentials[potential_key] = potential
return matches, potentials
@classmethod
def _find_slot_matches(
cls,
parameter_handler: Union["LibraryChargeHandler", "ChargeIncrementModelHandler"],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Construct a slot and potential map for a slot based parameter handler.
"""
# Ideally this would be made redundant by OpenFF TK #971
unique_parameter_matches = {
tuple(sorted(key)): (key, val)
for key, val in parameter_handler.find_matches(
reference_molecule.to_topology()
).items()
}
parameter_matches = {key: val for key, val in unique_parameter_matches.values()}
matches, potentials = {}, {}
for key, val in parameter_matches.items():
parameter = val.parameter_type
if isinstance(parameter_handler, LibraryChargeHandler):
(
parameter_matches,
parameter_potentials,
) = cls._library_charge_to_potentials(key, parameter)
elif isinstance(parameter_handler, ChargeIncrementModelHandler):
(
parameter_matches,
parameter_potentials,
) = cls._charge_increment_to_potentials(key, parameter)
else:
raise NotImplementedError()
matches.update(parameter_matches)
potentials.update(parameter_potentials)
return matches, potentials
@classmethod
def _find_am1_matches(
cls,
parameter_handler: Union["ToolkitAM1BCCHandler", ChargeIncrementModelHandler],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""Construct a slot and potential map for a charge model based parameter handler."""
reference_molecule = copy.deepcopy(reference_molecule)
reference_smiles = reference_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
method = getattr(parameter_handler, "partial_charge_method", "am1bcc")
partial_charges = cls._compute_partial_charges(
reference_molecule, method=method
)
matches = {}
potentials = {}
for i, partial_charge in enumerate(partial_charges):
potential_key = PotentialKey(
id=reference_smiles, mult=i, associated_handler="ToolkitAM1BCC"
)
potentials[potential_key] = Potential(parameters={"charge": partial_charge})
matches[TopologyKey(atom_indices=(i,))] = potential_key
return matches, potentials
@classmethod
def _find_reference_matches(
cls,
parameter_handlers: Dict[str, "ElectrostaticsHandlerType"],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Construct a slot and potential map for a particular reference molecule and set of parameter handlers.
"""
matches = {}
potentials = {}
expected_matches = {i for i in range(reference_molecule.n_atoms)}
for handler_type in cls.parameter_handler_precedence():
if handler_type not in parameter_handlers:
continue
parameter_handler = parameter_handlers[handler_type]
slot_matches, am1_matches = None, None
slot_potentials: Dict = {}
am1_potentials: Dict = {}
if handler_type in ["LibraryCharges", "ChargeIncrementModel"]:
slot_matches, slot_potentials = cls._find_slot_matches(
parameter_handler, reference_molecule
)
if handler_type in ["ToolkitAM1BCC", "ChargeIncrementModel"]:
am1_matches, am1_potentials = cls._find_am1_matches(
parameter_handler, reference_molecule
)
if slot_matches is None and am1_matches is None:
raise NotImplementedError()
elif slot_matches is not None and am1_matches is not None:
am1_matches = {
TopologyKey(
atom_indices=topology_key.atom_indices, mult=0
): potential_key
for topology_key, potential_key in am1_matches.items()
}
slot_matches = {
TopologyKey(
atom_indices=topology_key.atom_indices, mult=1
): potential_key
for topology_key, potential_key in slot_matches.items()
}
matched_atom_indices = {
index for key in slot_matches for index in key.atom_indices
}
matched_atom_indices.intersection_update(
{index for key in am1_matches for index in key.atom_indices}
)
elif slot_matches is not None:
matched_atom_indices = {
index for key in slot_matches for index in key.atom_indices
}
else:
matched_atom_indices = {
index for key in am1_matches for index in key.atom_indices # type: ignore[union-attr]
}
if matched_atom_indices != expected_matches:
# Handle the case where a handler could not fully assign the charges
# to the whole molecule.
continue
matches.update(slot_matches if slot_matches is not None else {})
matches.update(am1_matches if am1_matches is not None else {})
potentials.update(slot_potentials)
potentials.update(am1_potentials)
break
found_matches = {index for key in matches for index in key.atom_indices}
if found_matches != expected_matches:
raise RuntimeError(
f"{reference_molecule.to_smiles(explicit_hydrogens=False)} could "
f"not be fully assigned charges."
)
return matches, potentials
def store_matches(
self,
parameter_handler: Union[
"ElectrostaticsHandlerType", List["ElectrostaticsHandlerType"]
],
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
# Reshape the parameter handlers into a dictionary for easier referencing.
parameter_handlers = {
handler._TAGNAME: handler
for handler in (
parameter_handler
if isinstance(parameter_handler, list)
else [parameter_handler]
)
}
self.potentials = dict()
self.slot_map = dict()
reference_molecules = [*topology.reference_molecules]
for reference_molecule in reference_molecules:
matches, potentials = self._find_reference_matches(
parameter_handlers, reference_molecule
)
match_mults = defaultdict(set)
for top_key in matches:
match_mults[top_key.atom_indices].add(top_key.mult)
self.potentials.update(potentials)
for top_mol in topology._reference_molecule_to_topology_molecules[
reference_molecule
]:
for topology_particle in top_mol.atoms:
reference_index = topology_particle.atom.molecule_particle_index
topology_index = topology_particle.topology_particle_index
for mult in match_mults[(reference_index,)]:
top_key = TopologyKey(atom_indices=(topology_index,), mult=mult)
self.slot_map[top_key] = matches[
TopologyKey(atom_indices=(reference_index,), mult=mult)
]
def store_potentials(
self,
parameter_handler: Union[
"ElectrostaticsHandlerType", List["ElectrostaticsHandlerType"]
],
) -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
# This logic is handled by ``store_matches`` as we may need to create potentials
# to store depending on the handler type.
pass
class SMIRNOFFVirtualSiteHandler(SMIRNOFFPotentialHandler):
"""
A handler which stores the information necessary to construct virtual sites (virtual particles).
"""
type: Literal["Bonds"] = "Bonds"
expression: Literal[""] = ""
virtual_site_key_topology_index_map: Dict["VirtualSiteKey", int] = Field(
dict(),
description="A mapping between VirtualSiteKey objects (stored analogously to TopologyKey objects"
"in other handlers) and topology indices describing the associated virtual site",
)
exclusion_policy: Literal["parents"] = "parents"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [VirtualSiteHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of parameter attributes supported by this handler."""
return ["distance", "outOfPlaneAngle", "inPlaneAngle"]
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of [TopologyKey, PotentialKey].
Differs from SMIRNOFFPotentialHandler.store_matches because each key
can point to multiple potentials (?); each value in the dict is a
list of parametertypes, whereas conventional handlers don't have lists
"""
virtual_site_index = topology.n_topology_atoms
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val_list in matches.items():
for val in val_list:
virtual_site_key = VirtualSiteKey(
atom_indices=key,
type=val.parameter_type.type,
match=val.parameter_type.match,
)
potential_key = PotentialKey(
id=val.parameter_type.smirks,
associated_handler=parameter_handler_name,
)
self.slot_map[virtual_site_key] = potential_key
self.virtual_site_key_topology_index_map[
virtual_site_key
] = virtual_site_index
virtual_site_index += 1
def store_potentials(self, parameter_handler: ParameterHandler) -> None:
"""Store VirtualSite-specific parameter-like data."""
if self.potentials:
self.potentials = dict()
for potential_key in self.slot_map.values():
smirks = potential_key.id
parameter_type = parameter_handler.get_parameter({"smirks": smirks})[0]
potential = Potential(
parameters={
"distance": parameter_type.distance,
},
)
for attr in ["outOfPlaneAngle", "inPlaneAngle"]:
if hasattr(parameter_type, attr):
potential.parameters.update(
{attr: from_openmm(getattr(parameter_type, attr))}
)
self.potentials[potential_key] = potential
def _get_local_frame_weights(self, virtual_site_key: "VirtualSiteKey"):
if virtual_site_key.type == "BondCharge":
origin_weight = [1.0, 0.0]
x_direction = [-1.0, 1.0]
y_direction = [-1.0, 1.0]
elif virtual_site_key.type == "MonovalentLonePair":
origin_weight = [1, 0.0, 0.0]
x_direction = [-1.0, 1.0, 0.0]
y_direction = [-1.0, 0.0, 1.0]
elif virtual_site_key.type == "DivalentLonePair":
origin_weight = [0.0, 1.0, 0.0]
x_direction = [0.5, -1.0, 0.5]
y_direction = [1.0, -1.0, 1.0]
elif virtual_site_key.type == "TrivalentLonePair":
origin_weight = [0.0, 1.0, 0.0, 0.0]
x_direction = [1 / 3, -1.0, 1 / 3, 1 / 3]
y_direction = [1.0, -1.0, 0.0, 0.0]
return origin_weight, x_direction, y_direction
def _get_local_frame_position(self, virtual_site_key: "VirtualSiteKey"):
potential_key = self.slot_map[virtual_site_key]
potential = self.potentials[potential_key]
if virtual_site_key.type == "BondCharge":
distance = potential.parameters["distance"]
local_frame_position = np.asarray([-1.0, 0.0, 0.0]) * distance
elif virtual_site_key.type == "MonovalentLonePair":
distance = potential.parameters["distance"]
theta = potential.parameters["inPlaneAngle"].m_as(unit.radian) # type: ignore
psi = potential.parameters["outOfPlaneAngle"].m_as(unit.radian) # type: ignore
factor = np.array(
[np.cos(theta) * | np.cos(psi) | numpy.cos |
"""
MIT License
Copyright (c) 2021 Overcomer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import pickle
import logging
import cv2
import numpy
from skimage.feature import local_binary_pattern as LBP
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers, regularizers
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from .openface import OpenFace
from new_tools import check_image
class LBPCNN(OpenFace):
def __init__(self):
"""
Init label_encoder and model.
"""
self.label_encoder = None
self.model = None
def build_LBPCNN_model(self,
name="LBPCNN",
input_shape=(256, 256, 1),
classes=18,
learning_rate=2.5e-4):
"""
LBPCNN architecture.
Args
----
name: Specify CNN model name.
input_shape: Specify LBPCNN input shape. Default 256x256x1(local binary patterns image)
classes: Specify classes amount.
learning_rate: Specify the learning rate of Adam optimizer.
"""
model = Sequential(name=name)
model.add(layers.Conv2D(filters=40, kernel_size=(3, 3), padding="same", input_shape=input_shape, activation="relu", name="cnn1_1"))
model.add(layers.Conv2D(filters=40, kernel_size=(3, 3), padding="same", activation="relu", name="cnn1_2"))
model.add(layers.BatchNormalization(name="bn1"))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, name="mp1"))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu", name="cnn2_1"))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu", name="cnn2_2"))
model.add(layers.BatchNormalization(name="bn2"))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, name="mp2"))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding="same", activation="relu", name="cnn3_1"))
model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding="same", activation="relu", name="cnn3_2"))
model.add(layers.BatchNormalization(name="bn3"))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, name="mp3"))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu", name="cnn4_1"))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu", name="cnn4_2"))
model.add(layers.BatchNormalization(name="bn4"))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, name="mp4"))
model.add(layers.Conv2D(filters=40, kernel_size=(3, 3), padding="same", activation="relu", name="cnn5_1"))
model.add(layers.Conv2D(filters=40, kernel_size=(3, 3), padding="same", activation="relu", name="cnn5_2"))
model.add(layers.BatchNormalization(name="bn5"))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, name="mp5"))
model.add(layers.Flatten(name="flatten"))
model.add(layers.Dense(256, activation="relu", name="fc1"))
model.add(layers.Dropout(0.25, name="dp1"))
model.add(layers.BatchNormalization(name="bn6"))
model.add(layers.Dense(128, kernel_regularizer=regularizers.L1L2(0.0001), bias_regularizer=regularizers.L1L2(0.0001), activation="relu", name="fc2"))
model.add(layers.Dropout(0.25, name="dp2"))
model.add(layers.BatchNormalization(name="bn7"))
model.add(layers.Dense(classes, activation="softmax", name="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=Adam(learning_rate=learning_rate),
metrics=["accuracy"])
model.summary()
self.model = model
def load_model(self,
label_encoder_path=str(),
model_path=str()):
"""
Load LBPCNN model from SaveModel class. You need to specify model diretory path.
Args
----
label_encoder_path: Specify label encoder path.
model_path: Specify LBPCNN model path.
"""
logging.info("Loading LBPCNN model...")
if not os.path.exists(label_encoder_path):
logging.critical("'{}' error ! Loaded LBPCNN label encoder failed.".format(label_encoder_path))
raise FileNotFoundError
if not os.path.exists(model_path):
logging.critical("'{}' error ! Loaded LBPCNN model failed.".format(model_path))
raise FileNotFoundError
with open(label_encoder_path, "rb") as lab:
self.label_encoder = pickle.load(lab)
self.model = load_model(model_path)
logging.info("Loaded LBPCNN model successfully !")
def train_model(self,
images,
labels,
validation_data=tuple(),
model=Sequential(),
epochs=200,
batch_size=128,
verbose=1,
time_record=str(),
save_path=str()):
"""
Train LBPCNN model.
Args
----
images: Training images array.
labels: Training labels.
validation_data: Input validation images array and validation labels.
model: Sequential model instance.
epochs: Training model count.
batch_size: Images amount of once training model. Power of 2 like 2,4,8,...,128.
verbose: Training model real time detail information.
time_record: Build training model directory prefixes.
save_path: Save model root directory.
Return
------
train_history: LBPCNN model training history.
"""
logging.info("Training LBPCNN model...")
root_path = os.path.join(save_path, "{}".format(time_record))
logging.info("Building LBPCNN model saved path to {}...".format(root_path))
if not os.path.join(root_path):
os.makedirs(root_path)
plot_image = os.path.join(root_path, "{}_LBPCNN_model_architecture.png".format(time_record))
logging.info("Saving LBPCNN model architecture image to {}...".format(plot_image))
plot_model(model,
to_file=plot_image,
show_shapes=True,
show_layer_names=True)
log_dir = os.path.join(root_path, "logs/fit/{}".format(time_record))
logging.info("Building tensorboard log directory to {}...".format(log_dir))
os.makedirs(log_dir)
tensorboard_callback = TensorBoard(log_dir=log_dir,
histogram_freq=1,
write_graph=True,
write_images=True,
embeddings_freq=False)
checkpoint_dir = os.path.join(root_path, "{}_best_checkpoint".format(time_record))
logging.info("Building checkpoint directory to {}...".format(checkpoint_dir))
os.makedirs(checkpoint_dir)
checkpoint_filepath = os.path.join(checkpoint_dir, "lbpcnn_{epoch:04d}.ckpt")
checkpoint_callback = ModelCheckpoint(filepath=checkpoint_filepath,
monitor='val_loss',
mode='min',
verbose=0,
save_best_only=True)
train_history = model.fit(x=images,
y=labels,
validation_data=validation_data,
epochs=epochs,
batch_size=batch_size,
verbose=verbose,
callbacks=[tensorboard_callback, checkpoint_callback])
logging.info("Saved LBPCNN model to {}.".format(root_path))
logging.info("Finished LBPCNN model training !")
return train_history
def predict(self,
image,
LBP_sample_point=8,
LBP_radius=2,
LBP_method="uniform"):
"""
Predict image.
Args
----
image: BGR image format.
LBP_sample_point: Local binary patterns sample points. Default 8 point.
LBP_radius: Local binary patterns radius. Default 2.
LBP_method: Local binary patterns method. Default uniform method.
Return
------
pred_id: Prediction index.
pred_proba: Prediction probability.
"""
pred_id = None
pred_proba = None
state, image = check_image(image)
if state == 0:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
lbp_image = LBP(gray_image, P=LBP_sample_point, R=LBP_radius, method=LBP_method)
expand_lbp_image = | numpy.expand_dims(lbp_image, axis=0) | numpy.expand_dims |
import numpy as np
import texttable as tt
from scipy import linalg
import sys
import random
def is_close(float1, float2, epsilon=1e-6):
return abs(float1 - float2) <= epsilon
def f1(x):
return 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2
def f2(x):
return (x[0] - 4) ** 2 + 4 * (x[1] - 2) ** 2
def f3(x):
return (x[0] - 2) ** 2 + (x[1] + 3) ** 2
def f4(x):
return (x[0] - 3) ** 2 + x[1] ** 2
# Enables us to not repeat calculations and keep track of number of evals
class GoalFunction:
def __init__(self, function, start=None):
self.f = function
self.start = np.array(start)
self.count = 0
self.store = dict()
def eval(self, x):
if str(x) not in self.store.keys():
self.store[str(x)] = self.f(x)
self.count += 1
return self.store[str(x)]
def reset(self):
self.count = 0
self.store = dict()
# Wraps GoalFunction in order to search along an axis
# e_i is direction vector
class LambdaMinWrapper:
def __init__(self, gf, x, e_i):
self.gf = gf
self.x = x
self.e_i = e_i
def eval(self, L):
return self.gf.eval(self.x + L * self.e_i)
'''
Postupak trazenja unimodalnog intervala
Ulazne velicine:
- gf: ciljna funkcija (klasa GoalFunction)
- tocka: pocetna tocka pretrazivanja
- h: pomak pretrazivanja
- f: ciljna funkcija
Izlazne vrijednosti:
- unimodalni interval [l, r]
'''
def get_unimodal_interval(gf, tocka, h=1):
step = 1
l, m, r = float(tocka) - h, float(tocka), float(tocka) + h
fl, fm, fr = gf.eval(l), gf.eval(m), gf.eval(r)
if (fm < fr) and (fm < fl):
return [float(l), float(r)]
elif fm > fr:
while fm > fr:
l = float(m)
m = float(r)
fm = float(fr)
step *= 2
r = float(tocka) + h * step
fr = gf.eval(r)
else:
while fm > fl:
r = float(m)
m = float(l)
fm = float(fl)
step *= 2
l = float(tocka) - h * step
fl = gf.eval(l)
return [float(l), float(r)]
'''
Algoritam zlatnog reza
ulazne velicine:
- a, b: pocetne granice unimodalnog intervala
- e: preciznost
ako je jedan od a ili b None, obavlja se get_unimodal_interval
'''
def golden_section_search(gf, a=None, b=None, e=1e-6):
if a is None:
a, b = get_unimodal_interval(gf, b, 1)
elif b is None:
a, b = get_unimodal_interval(gf, a, 1)
k = 0.5 * (np.sqrt(5) - 1)
c = b - k * (b - a)
d = a + k * (b - a)
fc = gf.eval(c)
fd = gf.eval(d)
while (b - a) > e:
if fc < fd:
b = float(d)
d = float(c)
c = b - k * (b - a)
fd = float(fc)
fc = gf.eval(c)
else:
a = c
c = d
d = a + k * (b - a)
fc = fd
fd = gf.eval(d)
return (a + b) / 2
'''
Algoritam simpleks postupka po Nelderu i Meadu (Downhill simplex)
gf - ciljna funkcija
x0 - pocetna tocka
step - koef pomaka jednog koraka
alpha - koef refleksija
beta - koef kontrakcija
gamma - koef ekspanzije
sigma - koef pomicanja u smjeru najbolje tocke
mat_iter - maksimalan broj iteracija
'''
def nelder_mead(gf, x0, step=1, alpha=1, beta=0.5, gamma=2, sigma=0.5, e=1e-6, max_iter=10000):
tab = tt.Texttable()
tab.header(['Iteracija', 'Centroid', 'f(centroid)', 'Simplex'])
# pocetni simplex
simplex, centroid = [x0], np.array([])
for i in range(len(x0)):
tocka = np.array(x0)
tocka[i] += step
simplex.append(tocka)
simplex = np.array(simplex)
for i in range(max_iter):
# indeksi najvece i najmanje vrijednosti funkcije
l, h = get_max_and_min(gf, simplex)
centroid = get_centroid(simplex, h)
tab.add_row([i, centroid, gf.eval(centroid), simplex])
reflected = reflection(simplex[h], centroid, alpha)
if gf.eval(reflected) < gf.eval(simplex[l]):
expanded = expansion(reflected, centroid, gamma)
if gf.eval(expanded) < gf.eval(simplex[l]):
simplex[h] = np.array(expanded)
else:
simplex[h] = np.array(reflected)
else:
# ako F(Xr)>F(X[j]) za svaki j=0..n, j!=h
condition = True
for j in range(simplex.shape[0]):
if j == h:
continue
if gf.eval(reflected) <= gf.eval(simplex[j]):
condition = False
break
if condition is True: # ako F(Xr)>F(X[j]) za svaki j=0..n, j!=h
if gf.eval(reflected) < gf.eval(simplex[h]):
simplex[h] = np.array(reflected)
contracted = contraction(simplex[h], centroid, beta)
if gf.eval(contracted) < gf.eval(simplex[h]):
simplex[h] = np.array(contracted)
else:
# pomakni sve tocke prema simplex[l]
simplex = move_all_to_lowest(simplex, l, sigma)
else:
simplex[h] = np.array(reflected)
# stop if stop value <= epsilon
stop_value = 0
for i in range(simplex.shape[0]):
stop_value += (gf.eval(simplex[i]) - gf.eval(centroid)) ** 2
stop_value = np.sqrt(stop_value / float(simplex.shape[0]))
if stop_value <= e:
print("[+] Cilj dostignut prije max_iter, stop_value ={0}!!".format(stop_value))
break
print(tab.draw())
# l, h = get_max_and_min(gf, simplex)
# return simplex[l]
return centroid
# mice sve tocke simplexa prema najmanjoj
def move_all_to_lowest(simplex, l, sigma):
new_simplex = np.array([simplex[l]])
for i in range(simplex.shape[0]):
new_simplex = np.vstack([new_simplex, np.array([sigma * (simplex[i] + simplex[l])])])
return new_simplex[1:]
# prima najgoru tocku i vraca njenu refleksiju
def reflection(tocka, centroid, alpha):
return (1 + alpha) * centroid - alpha * tocka
# prima reflektiranu tocku i produljuje ju u smjeru centroida
def expansion(reflected, centroid, gamma):
return (1 - gamma) * centroid + gamma * reflected
# prima najgoru tocku i pomice ju u smjeru centroida tako da smanji simpleks
def contraction(tocka, centroid, beta):
return (1 - beta) * centroid + beta * tocka
# vraca centroid svih osim skip_i-te tocke
def get_centroid(simplex, skip_i):
centroid = np.zeros(simplex.shape[1])
for i in range(simplex.shape[0]):
if i == skip_i:
continue
for j in range(simplex.shape[1]):
centroid[j] += simplex[i][j]
return np.true_divide(centroid, simplex.shape[0] - 1)
# vraca indekse najbolje i najgore tocke
def get_max_and_min(gf, simplex):
l, h = 0, 0
max_value = gf.eval(simplex[0])
min_value = gf.eval(simplex[0])
for i in range(1, simplex.shape[0]):
value = gf.eval(simplex[i])
if value > max_value:
max_value = value
h = i
if value < min_value:
min_value = value
l = i
return l, h
'''
Algoritam Hooke-Jeeves postupka
x0 - pocetna tocka
xB - bazna tocka
xP - pocetna tocka pretrazivanja
xN - tocka dobivena pretrazivanjem
'''
def hooke_jeeves(gf, x0, dx=0.5, e=10e-6, max_iter=200):
tab = tt.Texttable()
tab.header(['Iteracija', 'Bazna', 'Pocetna', 'Explored'])
start = base = x0
for i in range(max_iter):
explored = explore(gf, start, dx=dx)
if gf.eval(explored) < gf.eval(base): # prihvati baznu tocku
start = 2 * explored - base # definiramo novu tocku pretrazivanja
base = np.array(explored)
else:
dx /= 2.0
start = np.array(base) # vratimo se na zadnju baznu tocku
tab.add_row([i, "f({0})={1}".format(base, gf.eval(base)),
"f({0})={1}".format(start, gf.eval(start)),
"f({0})={1}".format(explored, gf.eval(explored))])
# uvjet zaustavljanja
if dx < e:
print("[+] Kraj prije max_iter, dx=", dx)
break
print(tab.draw())
if gf.eval(start) < gf.eval(base) and gf.eval(start) < gf.eval(explored):
return start
elif gf.eval(explored) < gf.eval(base) and gf.eval(explored) < gf.eval(start):
return explored
return base
# hooke jeeves helper functions
def explore(gf, tocka, dx=0.5):
x = np.array(tocka)
for i in range(x.shape[0]):
P = gf.eval(x)
x[i] = float(x[i]) + dx
N = gf.eval(x)
if N > P: # ne valja pozitivni pomak
x[i] -= 2 * dx
N = gf.eval(x)
if N > P: # ne valja ni negativni
x[i] += dx # vratimo na staro
return x
'''
Helper for gradient methods
'''
def approx_gradient(f, x, delta=1e-9):
gradients = []
for j in range(len(x)):
tmp_x1, tmp_x2 = list(x), list(x)
tmp_x1[j] += delta
tmp_x2[j] -= delta
gradient_approx = f.eval(tmp_x1) - f.eval(tmp_x2)
gradient_approx /= (2 * delta)
gradients.append(gradient_approx)
return gradients
'''
Gradient Descent with gradient calculation on the fly
function GoalFunction which we are minimizing
x Vector with start values
golden_section Finds optimal learning rate if True
'''
def gradient_descent(f, x, golden_section=True, epsilon=1e-9, rate=1, iterations=1000, delta=1e-9):
stuck_count, f_x, f_best = 0, 0, 10e18
for i in range(iterations):
if f.eval(x) >= f_best:
stuck_count += 1
else:
stuck_count = 0
if stuck_count == 100:
break
f_x = f.eval(x)
if f_x < f_best:
f_best = float(f_x)
print("{}: f({}): {}".format(i, x, f_x))
if abs(f_x) <= epsilon:
print("Success!")
break
gradients = approx_gradient(f, x, delta=delta)
if golden_section is False:
for j in range(len(x)):
x[j] -= rate * gradients[j]
else: # using golden section search to find optimal learning rate
for j in range(len(x)):
Lgf = LambdaMinWrapper(f, x, np.array(gradients))
unimodal = get_unimodal_interval(Lgf, 0)
L = golden_section_search(Lgf, unimodal[0], unimodal[1])
x[j] += L * gradients[j]
f_x = f.eval(x)
print("Final result: f({}): {}\n".format(x, f_x))
return x, f_x, f.count
'''
Helper for Newton methods
'''
def hesse(f, x, delta=1e-6):
d = len(x)
grad_x = approx_gradient(f, x)
H = []
for i in range(len(grad_x)):
tmp_x1, tmp_x2 = list(x), list(x)
tmp_x1[i] += delta
tmp_x2[i] -= delta
sd1 = np.array(approx_gradient(f, tmp_x1))
sd2 = np.array(approx_gradient(f, tmp_x2))
second_derivative = (sd1 - sd2) / (2 * delta)
H.append(second_derivative)
return H
def supstitute_backward(U, y):
return linalg.solve(U, y)
def supstitute_forward(L, P, E):
N = L.shape[0]
PE = np.dot(P, E)
return linalg.solve(L, PE)
def inverse(m):
m = np.array(m)
E = np.zeros(m.shape)
for i in range(E.shape[0]):
E[i][i] = 1
P, L, U = linalg.lu(m)
X = []
for i in range(m.shape[0]):
y = supstitute_forward(L, P, E)
x = supstitute_backward(U, y)
X.append(x)
return np.array(X)
'''
Newton Raphson optimization method
function GoalFunction which we are minimizing
x Vector with start values
golden_section Finds optimal learning rate if True
'''
def newton_rhapson(f, x, epsilon=1e-6, iterations=1000):
counters = {'hesse': 0, 'gradient': 0, "f_evals": 0}
for i in range(iterations):
f_x = f.eval(x)
print("{}: f({}): {}".format(i, x, f_x))
if abs(f_x) <= epsilon:
print("Success!")
break
gradient = np.array(approx_gradient(f, x))
H = np.array(hesse(f, x))
counters['hesse'] += 1
counters['gradient'] += 2 * len(gradient) + 1
print("Hesse:\n", H)
try:
step = np.dot(inverse(H), gradient)[0][:]
Lgf = LambdaMinWrapper(f, x, step)
unimodal = get_unimodal_interval(Lgf, 0)
L = golden_section_search(Lgf, unimodal[0], unimodal[1])
x = x + L * step
except np.linalg.LinAlgError as e:
print(str(e))
print("\nCannot find inverse of hesse matrix\n")
return "FAILED"
f_x = f.eval(x)
print("Final result: f({}): {}\n".format(x, f_x))
counters['f_evals'] = f.count
return x, f_x, counters
'''
Helper method used with constrained optimization methods:
Box Algorithm
Mixed-Transformation Algorithm
Implicit limitations hardcoded:
(x2-x1 >= 0), (2-x1 >= 0)
'''
def check_implicit_limitations(x):
if x[1] - x[0] >= 0 and 2 - x[0] >= 0:
return True
else:
return False
'''
Helper for Box
returns indices of two worst points
'''
def get_worst_indices(simplex, f):
to_be_sorted = []
for i, x in enumerate(simplex):
to_be_sorted.append([i, f.eval(x)])
to_be_sorted = sorted(to_be_sorted, key=lambda x: x[1])
return to_be_sorted[-1][0], to_be_sorted[-2][0]
'''
Helper for Box
returns centroid of given points
'''
def centroid(simplex):
N = len(simplex[0])
xc = | np.zeros(N) | numpy.zeros |
from astropy.io import fits
import pandas as pd
import numpy as np
from lightkurve import KeplerLightCurve
def find_flares(flux, error, N1=4, N2=4, N3=3):
'''
The algorithm for local changes due to flares defined by
<NAME> et al. (2015), Eqn. 3a-d
http://arxiv.org/abs/1510.01005
Note: these equations were originally in magnitude units, i.e. smaller
values are increases in brightness. The signs have been changed, but
coefficients have not been adjusted to change from log(flux) to flux.
Parameters:
----------
flux : numpy array
data to search over
error : numpy array
errors corresponding to data.
N1 : int, optional
Coefficient from original paper (Default is 3 in paper, 4 here)
How many times above the stddev is required.
N2 : int, optional
Coefficient from original paper (Default is 1 in paper, 4 here)
How many times above the stddev and uncertainty is required
N3 : int, optional
Coefficient from original paper (Default is 3)
The number of consecutive points required to flag as a flare
Return:
------------
isflare : numpy array of booleans
datapoints are flagged with 1 if they belong to a flare candidate
'''
median = np.nanmedian(flux)
sigma = np.nanstd(flux)
T0 = flux - median # excursion should be positive #"N0"
T1 = np.abs(flux - median) / sigma #N1
T2 = np.abs(flux - median - error) / sigma #N2
# apply thresholds N0-N2:
pass_thresholds = | np.where((T0 > 0) & (T1 > N1) & (T2 > N2)) | numpy.where |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------
# Filename: core.py
# Purpose: Expansion of the obspy.core.stream module
# Author: uquake development team
# Email: <EMAIL>
#
# Copyright (C) 2016 uquake development team
# --------------------------------------------------------------------
"""
Expansion of the obspy.core.stream module
:copyright:
uquake development team (<EMAIL>)
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from abc import ABC
from io import BytesIO
import numpy as np
import obspy.core.stream as obsstream
from pkg_resources import load_entry_point
from .trace import Trace
from .util import ENTRY_POINTS, tools
from .logging import logger
from pathlib import Path
class Stream(obsstream.Stream, ABC):
__doc__ = obsstream.Stream.__doc__.replace('obspy', 'uquake')
def __init__(self, stream=None, **kwargs):
super(Stream, self).__init__(**kwargs)
if stream:
traces = []
for tr in stream.traces:
traces.append(Trace(trace=tr))
self.traces = traces
def composite(self):
"""
returns a new stream object containing composite trace for all station.
The amplitude of the composite traces are the norm of the amplitude of
the trace of all component and the phase of the trace (sign) is the
sign of the first components of a given station.
:param st: a stream object
:type st: ~uquake.core.stream.Stream
:rtype: ~uquake.core.stream.Stream
"""
return composite_traces(self)
def select(self, **kwargs):
if 'site' in kwargs.keys():
trs = [tr for tr in self.traces if tr.stats.site == kwargs['site']]
else:
return super().select(**kwargs)
st_tmp = Stream(traces=trs)
kwargs_tmp = {}
for key in kwargs.keys():
if key == 'site':
continue
kwargs_tmp[key] = kwargs[key]
return st_tmp.select(**kwargs_tmp)
def as_array(self, wlen_sec=None, taplen=0.05):
t0 = np.min([tr.stats.starttime for tr in self])
sr = self[0].stats.sampling_rate
if wlen_sec is not None:
npts_fix = int(wlen_sec * sr)
else:
npts_fix = int(np.max([len(tr.data) for tr in self]))
return tools.stream_to_array(self, t0, npts_fix, taplen=taplen), sr, t0
def chan_groups(self):
chanmap = self.channel_map()
groups = [np.where(sk == chanmap)[0] for sk in np.unique(chanmap)]
return groups
def channel_map(self):
stations = np.array([tr.stats.station for tr in self])
unique = np.unique(stations)
unique_dict = dict(zip(unique, np.arange(len(unique))))
chanmap = np.array([unique_dict[chan] for chan in stations], dtype=int)
return chanmap
def write(self, filename, format='MSEED', **kwargs):
from six import string_types
f = filename
if isinstance(filename, string_types):
if filename.endswith('gz'):
import gzip
f = gzip.open(filename, 'w')
elif filename.endswith('bz2'):
import bz2
f = bz2.BZ2File(filename, 'w')
elif filename.endswith('zip'):
print('Zip protocol is not supported')
st_out = self.copy()
return obsstream.Stream.write(st_out, f, format, **kwargs)
write.__doc__ = obsstream.Stream.write.__doc__.replace('obspy',
'uquake')
def write_bytes(self):
buf = BytesIO()
self.write(buf, format='MSEED')
return buf.getvalue()
def valid(self, **kwargs):
return is_valid(self, return_stream=True)
def concat(self, comp_st):
c = (comp_st is not None)
if c:
for i, (t1, t2) in enumerate(zip(comp_st.traces, self.traces)):
self.detrend_norm(t2)
comp_st.traces[i] = t1.__add__(t2, method=1, fill_value=0)
else:
for t in self:
self.detrend_norm(t)
comp_st = self
return comp_st
@property
def unique_stations(self):
return np.sort(np.unique([tr.stats.station for tr in self]))
@property
def unique_sites(self):
return np.sort(np.unique([tr.stats.site for tr in self]))
@property
def stations(self):
return self.unique_stations
@property
def sites(self):
return self.unique_sites
def zpad_names(self):
for tr in self.traces:
tr.stats.station = tr.stats.station.zfill(3)
self.sort()
def zstrip_names(self):
for tr in self.traces:
tr.stats.station = tr.stats.station.lstrip('0')
def distance_time_plot(self, event, site, scale=20, freq_min=100,
freq_max=1000):
"""
plot traces that have
:param event: event object
:param site: site object
:param scale: vertical size of pick markers and waveform
:return: plot handler
"""
st = self.copy()
st.detrend('demean')
st.taper(max_percentage=0.01)
st.filter('bandpass', freqmin=freq_min, freqmax=freq_max)
import matplotlib.pyplot as plt
import numpy as np
# initializing the plot
ax = plt.subplot(111)
if event.preferred_origin():
origin = event.preferred_origin()
elif event.origins:
origin = event.origins[0]
else:
return
event_location = origin.loc
# find the earliest start time and latest end time
start_time = None
end_time = None
for tr in st:
if not start_time:
start_time = tr.stats.starttime
end_time = tr.stats.endtime
if tr.stats.starttime < start_time:
start_time = tr.stats.starttime
if tr.stats.endtime > end_time:
end_time = tr.stats.endtime
for tr in st:
station_code = tr.stats.station
# search for arrival
station = site.select(station_code).stations()[0]
station_location = station.loc
distance = np.linalg.norm(event_location - station_location)
p_pick = None
s_pick = None
data = (tr.data / np.max(np.abs(tr.data))) * scale
time_delta = tr.stats.starttime - start_time
time = np.arange(0, len(data)) / tr.stats.sampling_rate + \
time_delta
for arrival in origin.arrivals:
if arrival.get_pick().waveform_id.station_code == station_code:
distance = arrival.distance
if arrival.phase == 'P':
p_pick = arrival.get_pick().time - start_time
elif arrival.phase == 'S':
s_pick = arrival.get_pick().time - start_time
ax.plot(time, data + distance, 'k')
if p_pick:
ax.vlines(p_pick, distance - scale, distance + scale, 'r')
if s_pick:
ax.vlines(s_pick, distance - scale, distance + scale, 'b')
plt.xlabel('relative time (s)')
plt.ylabel('distance from event (m)')
@staticmethod
def create_from_json_traces(traces_json_list):
traces = []
# for tr_json in traces_json_list:
for i, tr_json in enumerate(traces_json_list):
stats = tr_json['stats']
tr = Trace.create_from_json(tr_json)
traces.append(tr)
return Stream(traces=traces)
def to_traces_json(self):
traces = []
for tr in self:
trout = tr.to_json()
traces.append(trout)
return traces
def plot(self, *args, **kwargs):
"""
see Obspy stream.plot()
"""
from ..imaging.waveform import WaveformPlotting
waveform = WaveformPlotting(stream=self, *args, **kwargs)
return waveform.plotWaveform(*args, **kwargs)
# from uquake.core import read, read_events
# from spp.utils import application
# app = application.Application()
# site = app.get_stations()
# st = read('2018-11-08T10:21:49.898496Z.mseed', format='mseed')
# cat = read_events('test.xml')
# evt = cat[0]
# st = st.composite()
def is_valid(st_in, return_stream=False, STA=0.005, LTA=0.1, min_num_valid=5):
"""
Determine if an event is valid or return valid traces in a stream
:param st_in: stream
:type st_in: uquake.core.stream.Stream
:param return_stream: return stream of valid traces if true else return
true if the event is valid
:type return_stream: bool
:param STA: short term average used to determine if an event is valid
:type STA: float
:param LTA: long term average
:type LTA: float
:param min_num_valid: minimum number of valid traces to declare the
event valid
:type min_num_valid: int
:rtype: bool or uquake.core.stream.Stream
"""
from scipy.ndimage.filters import gaussian_filter1d
from obspy.signal.trigger import recursive_sta_lta
st = st_in.copy()
st.detrend('demean').detrend('linear')
trstd = []
trmax = []
trs_out = []
st_comp = composite_traces(st)
for tr in st_comp:
if not np.any(tr.data):
continue
sampling_rate = tr.stats.sampling_rate
trstd.append(np.std(tr.data))
trmax.append(np.max(np.abs(tr.data)))
nsta = int(STA * sampling_rate)
nlta = int(LTA * sampling_rate)
cft = recursive_sta_lta(np.array(tr.data), nsta, nlta)
sfreq = tr.stats['sampling_rate']
sigma = sfreq / (2 * np.pi * 100)
cft = gaussian_filter1d(cft, sigma=sigma, mode='reflect')
try:
mx = np.r_[True, cft[1:] > cft[:-1]] & \
np.r_[cft[:-1] > cft[1:], True]
except Exception as e:
logger.error(e)
continue
i1 = np.nonzero(mx)[0]
i2 = i1[cft[i1] > np.max(cft) / 2]
tspan = (np.max(i2) - | np.min(i2) | numpy.min |
""" This script is devoted to plot the method comparison between
1. Hessian Adam > Basin CMA, and normal Adam
Comparison between Hessian CMA and normal CMA.
1. Plot separating layers in CNN.
"""
#%%
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import sys
import os
from os.path import join
from time import time
from scipy.stats import ttest_ind, ttest_rel
import matplotlib.cm as cm
figdir = r"E:\OneDrive - Washington University in St. Louis\Hessian_summary\Figure5"
#%% Simple Adam VS Adam with Hessian basis
# Same result for ImageNet and BigGAN generated images
summarydir = r"E:\Cluster_Backup\BigGAN_invert\ImageNet\summary"
expalign_tab_imgnt = pd.read_csv(join(summarydir, "exprecord_align.csv"))
#%%
summarydir = r"E:\Cluster_Backup\BigGAN_invert\BigGAN_rnd\summary"
expalign_tab_rnd = pd.read_csv(join(summarydir, "exprecord_align.csv"))
#%%
method_list = ['CMA10Adam10Final500_postAdam_all', 'CMA10Adam10Final500_postAdam_none']
label_list = ["BasinCMA Hess", "BasinCMA none"]
savestr = "BasinCMA"
method_list = ['CMA50Adam0Final500_postAdam_all',
'CMA50Adam0Final500_postAdam_none',]
label_list = ["CMAAdam Hess", "CMAAdam none"]
savestr = "CMAAdam"
method_list = ['CMA1Adam30Final600_postAdam_all',
'CMA1Adam30Final600_postAdam_none',]
label_list = ["Adam Hess", "Adam none"]
savestr = "Adam"
nmeth = 2
imgnet_msk = (~ expalign_tab_imgnt[method_list[0]].isna()) * (~ expalign_tab_imgnt[method_list[1]].isna())
rand_msk = (~ expalign_tab_rnd[method_list[0]].isna()) * (~ expalign_tab_rnd[method_list[1]].isna())
imgnet_mean = [expalign_tab_imgnt[method][imgnet_msk].mean() for method in method_list]
imgnet_sem = [expalign_tab_imgnt[method][imgnet_msk].sem() for method in method_list]
rnd_mean = [expalign_tab_rnd[method][rand_msk].mean() for method in method_list]
rnd_sem = [expalign_tab_rnd[method][rand_msk].sem() for method in method_list]
#%
plt.figure(figsize=[4,3])
intvs = np.arange(nmeth)[:,np.newaxis]
nsamps = sum(imgnet_msk)
xjit = np.random.randn(1, nsamps) * 0.1
plt.plot(0.05 + intvs.repeat(nsamps, 1) + xjit, expalign_tab_imgnt[method_list][imgnet_msk].T,
color="gray", alpha=0.15)
intvs = np.arange(nmeth, 2*nmeth)[:, np.newaxis]
nsamps = sum(rand_msk)
xjit = np.random.randn(1, nsamps) * 0.1
plt.plot(0.05 + intvs.repeat(nsamps, 1) + xjit, expalign_tab_rnd[method_list][rand_msk].T,
color="gray", alpha=0.15)
plt.errorbar(range(nmeth), imgnet_mean, yerr=imgnet_sem, capthick=2, capsize=5, lw=3, alpha=0.7)
plt.errorbar(range(nmeth, 2*nmeth), rnd_mean, yerr=rnd_sem, capthick=2, capsize=5, lw=3, alpha=0.7)
plt.xticks(range(2*nmeth), ["ImageNet\n%s"% label for label in label_list] +
["BigGAN rand\n%s"%label for label in label_list])
# plt.xticks(range(2*nmeth), ["ImageNet\nBasinCMA Hess", "ImageNet\nBasinCMA none",] +
# ["BigGAN rand\nBasinCMA Hess", "BigGAN rand\nBasinCMA none"])
plt.ylabel("LPIPS Image Dist")
stat_imgnt = ttest_rel(expalign_tab_imgnt[method_list[0]], expalign_tab_imgnt[method_list[1]], nan_policy="omit")
stat_rand = ttest_rel(expalign_tab_rnd[method_list[0]], expalign_tab_rnd[method_list[1]], nan_policy="omit")
dof_imgnt = sum(imgnet_msk) - 1
dof_rand = sum(rand_msk) - 1
plt.title("ImageNet: t=%.1f p=%.1e(dof=%d)\n"
"BigGAN rand: t=%.1f p=%.1e(dof=%d)"%(stat_imgnt.statistic, stat_imgnt.pvalue, dof_imgnt,
stat_rand.statistic, stat_rand.pvalue, dof_rand, ))
plt.savefig(join(figdir, "%s_xspace_Hess_cmp.png"%savestr))
plt.savefig(join(figdir, "%s_xspace_Hess_cmp.pdf"%savestr))
# plt.savefig(join(figdir, "BasinCMA_xspace_Hess_cmp.png"))
# plt.savefig(join(figdir, "BasinCMA_xspace_Hess_cmp.pdf"))
# plt.savefig(join(figdir, "Adam_xspace_Hess_cmp.png"))
# plt.savefig(join(figdir, "Adam_xspace_Hess_cmp.pdf"))
plt.show()
#%%
#%%
def var_cmp_plot(var=['CMA10Adam10Final500_postAdam_all', 'CMA10Adam10Final500_postAdam_none',
'CMA50Adam0Final500_postAdam_all', 'CMA50Adam0Final500_postAdam_none'],
labels=['BasinCMA_all', 'BasinCMA_none', 'CMA-Adam_all', 'CMA-Adam_none'],
data=None, msk=None, jitter=False, cmap=cm.RdBu, titstr="",
):
"""Designed to plot paired scatter plot for discrete categories. Paired t test is performed at the end and stats
are returned.
Input is a pandas dataframe and variable names in it."""
varn = len(var)
clist = [cmap(float((vari + .5) / (varn + 1))) for vari in range(varn)]
fig, ax = plt.subplots(figsize=[6, 8])
xjit = np.random.randn(data.shape[0]) * 0.1 if jitter else np.zeros(data.shape[0])
for vari, varnm in enumerate(var):
plt.scatter(vari + 1 + xjit, data[varnm], s=9, color=clist[vari], alpha=0.6,
label=labels[vari])
plt.legend()
intvs = np.arange(varn).reshape(1, -1)
plt.plot(1 + intvs.repeat(data.shape[0], 0).T + xjit[np.newaxis, :], data[var].T,
color="gray", alpha=0.1)
plt.xticks(np.arange(len(labels))+1, labels)
stats = {}
stats["T01"] = ttest_rel(data[var[0]], data[var[1]], nan_policy='omit')
stats["T02"] = ttest_rel(data[var[0]], data[var[2]], nan_policy='omit')
stats["T12"] = ttest_rel(data[var[1]], data[var[2]], nan_policy='omit')
plt.title(
"%s\nT: %s - %s:%.1f(%.1e)\n%s - %s:%.1f(%.1e)\n"
"%s - %s:%.1f(%.1e)" % (titstr, labels[0], labels[1], stats["T01"].statistic, stats["T01"].pvalue,
labels[0], labels[2], stats["T02"].statistic, stats["T02"].pvalue,
labels[1], labels[2], stats["T12"].statistic, stats["T12"].pvalue))
return fig, stats
def var_stripe_plot(var=[], labels=[], jitter=False, cmap=cm.RdBu, titstr="", tests=[(0,1),(1,2),(2,3)], median=None):
"""Designed to plot paired scatter plot for discrete categories. Paired t test is performed at the end and stats
are returned.
Input is a pandas dataframe and variable names in it."""
varn = len(var)
clist = [cmap(float((vari + .5) / (varn + 1))) for vari in range(varn)]
fig, ax = plt.subplots(figsize=[6, 8])
for vari, varnm in enumerate(var):
xjit = np.random.randn(len(var[vari])) * 0.1 if jitter else np.zeros(len(var[vari]))
plt.scatter(vari + 1 + xjit, var[vari], s=9, color=clist[vari], alpha=0.6,
label=labels[vari])
plt.legend()
ticks = np.arange(varn).reshape(1, -1)
# plt.plot(1 + ticks.repeat(data.shape[0], 0).T + xjit[np.newaxis, :], data[var].T,
# color="gray", alpha=0.1)
plt.xticks(np.arange(len(labels))+1, labels)
stats = {}
medstr = ""
if median is None: median = list(range(varn))
for vari in median:
med = np.nanmedian(var[vari])
stats["M%d" % vari] = med
medstr += "%s:%.2f " % (labels[vari], med)
if (vari+1)%2==0: medstr+="\n"
statstr = ""
for pair in tests:
t_res = ttest_ind(var[pair[0]], var[pair[1]], nan_policy='omit')
stats["T%d%d" % pair] = t_res
statstr += "%s - %s:%.1f(%.1e)\n"%(labels[pair[0]], labels[pair[1]], t_res.statistic, t_res.pvalue)
plt.title(
"%s\nMed:%s\nT: %s" % (titstr, medstr, statstr))
return fig, stats
#%%
"""Activation Maximization Cmp"""
rootdir = r"E:\Cluster_Backup\BigGAN_Optim_Tune_new"
summarydir = join(rootdir, "summary")
exprec_tab = pd.read_csv(join(summarydir, "optim_raw_score_tab.csv"))
align_tab = pd.read_csv(join(summarydir, "optim_aligned_score_tab_BigGAN.csv"))
#%%
align_tab_FC6 = pd.read_csv(join(summarydir, "optim_aligned_score_tab_fc6.csv"))
#%%
layers = align_tab_FC6.layer.unique()
optim_list = ["HessCMA", 'CholCMA']
optim_list_fc6 = ['HessCMA500_1_fc6', "CholCMA_fc6"]
colorseq = [cm.jet(i/(len(layers)-1)) for i in range(len(layers))]
plt.figure(figsize=[4,6])
for Li, layer in enumerate(layers):
xjit = np.random.randn(1) * 0.08
msk = align_tab_FC6.layer==layer
optim_fc6_mean = [align_tab_FC6[optim][msk].mean() for optim in optim_list_fc6]
optim_fc6_sem = [align_tab_FC6[optim][msk].sem() for optim in optim_list_fc6]
plt.errorbar(xjit+np.arange(2,4), optim_fc6_mean, yerr=optim_fc6_sem,
capthick=2, capsize=5, lw=3, alpha=0.55, color=colorseq[Li])
msk = align_tab.layer==layer
optim_mean = [align_tab[optim][msk].mean() for optim in optim_list]
optim_sem = [align_tab[optim][msk].sem() for optim in optim_list]
plt.errorbar(xjit+np.arange(0,2), optim_mean, yerr=optim_sem,
capthick=2, capsize=5, lw=3, alpha=0.55, color=colorseq[Li])
optim_fc6_mean = [align_tab_FC6[optim].mean() for optim in optim_list_fc6]
optim_fc6_sem = [align_tab_FC6[optim].sem() for optim in optim_list_fc6]
plt.errorbar( | np.arange(2,4) | numpy.arange |
"""
Created on Wed Nov 7 20:18:05 2018
@author: Raneem
"""
import numpy as np
import random
def runOperators(population, fitness,
crossoverProbability, mutationProbability,
nChromosomes, nPoints):
"""
This is the main method where the evolutionary operators are called
Parameters
----------
population : list
The list of chromosomes
fitness : list
The list of fitness values for each chromosome
crossoverProbability : float
The probability of crossover
mutationProbability : float
The probability of mutation
nChromosomes: int
Number of chrmosome in a population
nPoints:
Number of points (instances) in the dataset
Returns
-------
list
newPopulation: the new generated population after applying the genetic operations
"""
#initialize a new population
newPopulation = [None] * nChromosomes
#Create pairs of parents. The number of pairs equals the number of chromosomes divided by 2
for i in range(0, nChromosomes, 2):
#pair of parents selection
parent1, parent2 = pairSelection(population, fitness, nChromosomes)
#crossover
crossoverLength = min(len(parent1), len(parent2))
parentsCrossoverProbability = random.uniform(0.0, 1.0)
if parentsCrossoverProbability < crossoverProbability:
offspring1, offspring2 = crossover(crossoverLength, parent1, parent2)
else:
offspring1 = parent1.copy()
offspring2 = parent2.copy()
#Mutation
offspringMutationProbability = random.uniform(0.0, 1.0)
if offspringMutationProbability < mutationProbability:
mutation(offspring1, len(offspring1), nPoints)
offspringMutationProbability = random.uniform(0.0, 1.0)
if offspringMutationProbability < mutationProbability:
mutation(offspring2, len(offspring2), nPoints)
#Add offsprings to population
newPopulation[i] = offspring1.copy()
newPopulation[i + 1] = offspring2.copy()
return newPopulation
def elitism(population, fitness, labelsPred, bestChromosomeInAllGenerations,
bestFitnessInAllGenerations, bestLabelsPredInAllGenerations):
"""
This method performs the elitism operator
Parameters
----------
population : list
The list of chromosomes
fitness : list
The list of fitness values for each chromosome
labelPred : list
A list of predicted labels for the points for each chroomosome
bestChromosomeInAllGenerations : list
A chromosome of the previous generation having the best fitness value
bestFitnessInAllGenerations : float
The best fitness value of the previous generation
bestLabelsPredInAllGenerations :
A list of predicted labels for the previous generation having the best fitness value
Returns
-------
list
population : The updated population after applying the elitism
list
fitness : The updated list of fitness values for each chromosome after applying the elitism
list
labelsPred : The updated list of predicted labels for the points for each chroomosome after applying the elitism
list
bestChromosomeInAllGenerations : A chromosome of the current generation having the best fitness value
float
bestFitnessInAllGenerations : The best fitness value of the current generation
list
bestLabelsPredInAllGenerations : A list of predicted labels for the current generation having the best fitness value
"""
# get the worst chromosome
worstFitnessId = selectWorstChromosome(fitness)
#replace worst cromosome with best one from previous generation if its fitness is less than the other
if fitness[worstFitnessId] > bestFitnessInAllGenerations:
population[worstFitnessId] = bestChromosomeInAllGenerations.copy()
fitness[worstFitnessId] = bestFitnessInAllGenerations
labelsPred[worstFitnessId] = bestLabelsPredInAllGenerations.copy()
#update best chromosome
bestFitnessId = selectBestChromosome(fitness)
bestChromosomeInAllGenerations = population[bestFitnessId].copy()
bestFitnessInAllGenerations = fitness[bestFitnessId].copy()
bestLabelsPredInAllGenerations = labelsPred[bestFitnessId].copy()
return bestChromosomeInAllGenerations, bestFitnessInAllGenerations, bestLabelsPredInAllGenerations
def selectWorstChromosome(fitness):
"""
It is used to get the worst chromosome in a population based n the fitness value
Parameters
----------
fitness : list
The list of fitness values for each chromosome
Returns
-------
int
maxFitnessId: The chromosome id of the worst fitness value
"""
maxFitnessId = np.where(fitness == | np.max(fitness) | numpy.max |
import unittest
import numpy as np
import sympy
from sympy.abc import r, t, z
import discretize
from discretize import tests
np.random.seed(16)
TOL = 1e-1
# ----------------------------- Test Operators ------------------------------ #
MESHTYPES = ["uniformCylMesh", "randomCylMesh"]
call2 = lambda fun, xyz: fun(xyz[:, 0], xyz[:, 2])
call3 = lambda fun, xyz: fun(xyz[:, 0], xyz[:, 1], xyz[:, 2])
cyl_row2 = lambda g, xfun, yfun: np.c_[call2(xfun, g), call2(yfun, g)]
cyl_row3 = lambda g, xfun, yfun, zfun: np.c_[
call3(xfun, g), call3(yfun, g), call3(zfun, g)
]
cylF2 = lambda M, fx, fy: np.vstack(
(cyl_row2(M.gridFx, fx, fy), cyl_row2(M.gridFz, fx, fy))
)
cylF3 = lambda M, fx, fy, fz: np.vstack(
(
cyl_row3(M.gridFx, fx, fy, fz),
cyl_row3(M.gridFy, fx, fy, fz),
cyl_row3(M.gridFz, fx, fy, fz),
)
)
cylE3 = lambda M, ex, ey, ez: np.vstack(
(
cyl_row3(M.gridEx, ex, ey, ez),
cyl_row3(M.gridEy, ex, ey, ez),
cyl_row3(M.gridEz, ex, ey, ez),
)
)
# class TestCellGradx3D(tests.OrderTest):
# name = "CellGradx"
# MESHTYPES = MESHTYPES
# meshDimension = 3
# meshSizes = [8, 16, 32, 64]
# def getError(self):
# fun = lambda r, t, z: (
# np.sin(2.*np.pi*r) + np.sin(t) + np.sin(2*np.pi*z)
# )
# solR = lambda r, t, z: 2.*np.pi*np.cos(2.*np.pi*r)
# phi = call3(fun, self.M.gridCC)
# phix_num = self.M.cellGradx * phi
# phix_ana = call3(solR, self.M.gridFx)
# err = np.linalg.norm(phix_num - phix_ana, np.inf)
# return err
# def test_order(self):
# self.orderTest()
class TestFaceDiv3D(tests.OrderTest):
name = "FaceDiv"
meshTypes = MESHTYPES
meshDimension = 3
meshSizes = [8, 16, 32, 64]
def getError(self):
funR = lambda r, t, z: np.sin(2.0 * np.pi * r)
funT = lambda r, t, z: r * np.exp(-r) * np.sin(t) # * np.sin(2.*np.pi*r)
funZ = lambda r, t, z: np.sin(2.0 * np.pi * z)
sol = lambda r, t, z: (
(2 * np.pi * r * np.cos(2 * np.pi * r) + np.sin(2 * np.pi * r)) / r
+ np.exp(-r) * np.cos(t)
+ 2 * np.pi * np.cos(2 * np.pi * z)
)
Fc = cylF3(self.M, funR, funT, funZ)
# Fc = np.c_[Fc[:, 0], np.zeros(self.M.nF), Fc[:, 1]]
F = self.M.projectFaceVector(Fc)
divF = self.M.faceDiv.dot(F)
divF_ana = call3(sol, self.M.gridCC)
err = np.linalg.norm((divF - divF_ana), np.inf)
return err
def test_order(self):
self.orderTest()
class TestEdgeCurl3D(tests.OrderTest):
name = "edgeCurl"
meshTypes = MESHTYPES
meshDimension = 3
meshSizes = [8, 16, 32, 64]
def getError(self):
# use the same function in r, t, z
# need to pick functions that make sense at the axis of symmetry
# careful that r, theta contributions make sense at axis of symmetry
funR = lambda r, t, z: np.sin(2 * np.pi * z) * np.sin(np.pi * r) * np.sin(t)
funT = lambda r, t, z: np.cos(np.pi * z) * np.sin(np.pi * r) * np.sin(t)
funZ = lambda r, t, z: np.sin(np.pi * r) * np.sin(t)
derivR_t = lambda r, t, z: np.sin(2 * np.pi * z) * np.sin(np.pi * r) * np.cos(t)
derivR_z = (
lambda r, t, z: 2
* np.pi
* np.cos(2 * np.pi * z)
* np.sin(np.pi * r)
* np.sin(t)
)
derivT_r = (
lambda r, t, z: np.pi * np.cos(np.pi * z) * np.cos(np.pi * r) * np.sin(t)
)
derivT_z = (
lambda r, t, z: -np.pi * np.sin(np.pi * z) * np.sin(np.pi * r) * np.sin(t)
)
derivZ_r = lambda r, t, z: np.pi * np.cos(np.pi * r) * np.sin(t)
derivZ_t = lambda r, t, z: | np.sin(np.pi * r) | numpy.sin |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Process Hi-C output into AGP for chromosomal-scale scaffolding.
"""
from __future__ import print_function
import array
import json
import logging
import math
import os
import os.path as op
import sys
from collections import defaultdict
from functools import partial
from multiprocessing import Pool
import numpy as np
from jcvi.algorithms.ec import GA_run, GA_setup
from jcvi.algorithms.formula import outlier_cutoff
from jcvi.algorithms.matrix import get_signs
from jcvi.apps.base import ActionDispatcher, OptionParser, backup, iglob, mkdir, symlink
from jcvi.apps.console import green, red
from jcvi.apps.grid import Jobs
from jcvi.assembly.allmaps import make_movie
from jcvi.compara.synteny import check_beds, get_bed_filenames
from jcvi.formats.agp import order_to_agp
from jcvi.formats.base import LineFile, must_open
from jcvi.formats.bed import Bed
from jcvi.formats.blast import Blast
from jcvi.formats.sizes import Sizes
from jcvi.graphics.base import (
markup,
normalize_axes,
plt,
savefig,
ticker,
human_readable,
)
from jcvi.graphics.dotplot import dotplot
from jcvi.utils.cbook import gene_name, human_size
from jcvi.utils.natsort import natsorted
# Map orientations to ints
FF = {"+": 1, "-": -1, "?": 1}
RR = {"+": -1, "-": 1, "?": -1}
LB = 18 # Lower bound for golden_array()
UB = 29 # Upper bound for golden_array()
BB = UB - LB + 1 # Span for golden_array()
ACCEPT = green("ACCEPT")
REJECT = red("REJECT")
BINSIZE = 50000
class ContigOrderingLine(object):
"""Stores one line in the ContigOrdering file
"""
def __init__(self, line, sep="|"):
args = line.split()
self.contig_id = args[0]
self.contig_name = args[1].split(sep)[0]
contig_rc = args[2]
assert contig_rc in ("0", "1")
self.strand = "+" if contig_rc == "0" else "-"
self.orientation_score = args[3]
self.gap_size_after_contig = args[4]
class ContigOrdering(LineFile):
"""ContigOrdering file as created by LACHESIS, one per chromosome group.
Header contains summary information per group, followed by list of contigs
with given ordering.
"""
def __init__(self, filename):
super(ContigOrdering, self).__init__(filename)
fp = open(filename)
for row in fp:
if row[0] == "#":
continue
orderline = ContigOrderingLine(row)
self.append(orderline)
def write_agp(
self, obj, sizes, fw=sys.stdout, gapsize=100, gaptype="contig", evidence="map"
):
"""Converts the ContigOrdering file into AGP format
"""
contigorder = [(x.contig_name, x.strand) for x in self]
order_to_agp(
obj,
contigorder,
sizes,
fw,
gapsize=gapsize,
gaptype=gaptype,
evidence=evidence,
)
class CLMFile:
"""CLM file (modified) has the following format:
tig00046211+ tig00063795+ 1 53173
tig00046211+ tig00063795- 1 116050
tig00046211- tig00063795+ 1 71155
tig00046211- tig00063795- 1 134032
tig00030676+ tig00077819+ 5 136407 87625 87625 106905 102218
tig00030676+ tig00077819- 5 126178 152952 152952 35680 118923
tig00030676- tig00077819+ 5 118651 91877 91877 209149 125906
tig00030676- tig00077819- 5 108422 157204 157204 137924 142611
"""
def __init__(self, clmfile, skiprecover=False):
self.name = op.basename(clmfile).rsplit(".", 1)[0]
self.clmfile = clmfile
self.idsfile = clmfile.rsplit(".", 1)[0] + ".ids"
self.parse_ids(skiprecover)
self.parse_clm()
self.signs = None
def parse_ids(self, skiprecover):
"""IDS file has a list of contigs that need to be ordered. 'recover',
keyword, if available in the third column, is less confident.
tig00015093 46912
tig00035238 46779 recover
tig00030900 119291
"""
idsfile = self.idsfile
logging.debug("Parse idsfile `{}`".format(idsfile))
fp = open(idsfile)
tigs = []
for row in fp:
if row[0] == "#": # Header
continue
atoms = row.split()
tig, _, size = atoms
size = int(size)
if skiprecover and len(atoms) == 3 and atoms[2] == "recover":
continue
tigs.append((tig, size))
# Arrange contig names and sizes
_tigs, _sizes = zip(*tigs)
self.contigs = set(_tigs)
self.sizes = np.array(_sizes)
self.tig_to_size = dict(tigs)
# Initially all contigs are considered active
self.active = set(_tigs)
def parse_clm(self):
clmfile = self.clmfile
logging.debug("Parse clmfile `{}`".format(clmfile))
fp = open(clmfile)
contacts = {}
contacts_oriented = defaultdict(dict)
orientations = defaultdict(list)
for row in fp:
atoms = row.strip().split("\t")
assert len(atoms) == 3, "Malformed line `{}`".format(atoms)
abtig, links, dists = atoms
atig, btig = abtig.split()
at, ao = atig[:-1], atig[-1]
bt, bo = btig[:-1], btig[-1]
if at not in self.tig_to_size:
continue
if bt not in self.tig_to_size:
continue
dists = [int(x) for x in dists.split()]
contacts[(at, bt)] = len(dists)
gdists = golden_array(dists)
contacts_oriented[(at, bt)][(FF[ao], FF[bo])] = gdists
contacts_oriented[(bt, at)][(RR[bo], RR[ao])] = gdists
strandedness = 1 if ao == bo else -1
orientations[(at, bt)].append((strandedness, dists))
self.contacts = contacts
self.contacts_oriented = contacts_oriented
# Preprocess the orientations dict
for (at, bt), dists in orientations.items():
dists = [(s, d, hmean_int(d)) for (s, d) in dists]
strandedness, md, mh = min(dists, key=lambda x: x[-1])
orientations[(at, bt)] = (strandedness, len(md), mh)
self.orientations = orientations
def calculate_densities(self):
"""
Calculate the density of inter-contig links per base. Strong contigs
considered to have high level of inter-contig links in the current
partition.
"""
active = self.active
densities = defaultdict(int)
for (at, bt), links in self.contacts.items():
if not (at in active and bt in active):
continue
densities[at] += links
densities[bt] += links
logdensities = {}
for x, d in densities.items():
s = self.tig_to_size[x]
logd = np.log10(d * 1.0 / min(s, 500000))
logdensities[x] = logd
return logdensities
def report_active(self):
logging.debug(
"Active contigs: {} (length={})".format(self.N, self.active_sizes.sum())
)
def activate(self, tourfile=None, minsize=10000, backuptour=True):
"""
Select contigs in the current partition. This is the setup phase of the
algorithm, and supports two modes:
- "de novo": This is useful at the start of a new run where no tours
available. We select the strong contigs that have significant number
of links to other contigs in the partition. We build a histogram of
link density (# links per bp) and remove the contigs that appear as
outliers. The orientations are derived from the matrix decomposition
of the pairwise strandedness matrix O.
- "hotstart": This is useful when there was a past run, with a given
tourfile. In this case, the active contig list and orientations are
derived from the last tour in the file.
"""
if tourfile and (not op.exists(tourfile)):
logging.debug("Tourfile `{}` not found".format(tourfile))
tourfile = None
if tourfile:
logging.debug("Importing tourfile `{}`".format(tourfile))
tour, tour_o = iter_last_tour(tourfile, self)
self.active = set(tour)
tig_to_idx = self.tig_to_idx
tour = [tig_to_idx[x] for x in tour]
signs = sorted([(x, FF[o]) for (x, o) in zip(tour, tour_o)])
_, signs = zip(*signs)
self.signs = np.array(signs, dtype=int)
if backuptour:
backup(tourfile)
tour = array.array("i", tour)
else:
self.report_active()
while True:
logdensities = self.calculate_densities()
lb, ub = outlier_cutoff(list(logdensities.values()))
logging.debug("Log10(link_densities) ~ [{}, {}]".format(lb, ub))
remove = set(
x
for x, d in logdensities.items()
if (d < lb and self.tig_to_size[x] < minsize * 10)
)
if remove:
self.active -= remove
self.report_active()
else:
break
logging.debug("Remove contigs with size < {}".format(minsize))
self.active = set(x for x in self.active if self.tig_to_size[x] >= minsize)
tour = range(self.N) # Use starting (random) order otherwise
tour = array.array("i", tour)
# Determine orientations
self.flip_all(tour)
self.report_active()
self.tour = tour
return tour
def evaluate_tour_M(self, tour):
""" Use Cythonized version to evaluate the score of a current tour
"""
from .chic import score_evaluate_M
return score_evaluate_M(tour, self.active_sizes, self.M)
def evaluate_tour_P(self, tour):
""" Use Cythonized version to evaluate the score of a current tour,
with better precision on the distance of the contigs.
"""
from .chic import score_evaluate_P
return score_evaluate_P(tour, self.active_sizes, self.P)
def evaluate_tour_Q(self, tour):
""" Use Cythonized version to evaluate the score of a current tour,
taking orientation into consideration. This may be the most accurate
evaluation under the right condition.
"""
from .chic import score_evaluate_Q
return score_evaluate_Q(tour, self.active_sizes, self.Q)
def flip_log(self, method, score, score_flipped, tag):
logging.debug("{}: {} => {} {}".format(method, score, score_flipped, tag))
def flip_all(self, tour):
""" Initialize the orientations based on pairwise O matrix.
"""
if self.signs is None: # First run
score = 0
else:
old_signs = self.signs[: self.N]
(score,) = self.evaluate_tour_Q(tour)
# Remember we cannot have ambiguous orientation code (0 or '?') here
self.signs = get_signs(self.O, validate=False, ambiguous=False)
(score_flipped,) = self.evaluate_tour_Q(tour)
if score_flipped >= score:
tag = ACCEPT
else:
self.signs = old_signs[:]
tag = REJECT
self.flip_log("FLIPALL", score, score_flipped, tag)
return tag
def flip_whole(self, tour):
""" Test flipping all contigs at the same time to see if score improves.
"""
(score,) = self.evaluate_tour_Q(tour)
self.signs = -self.signs
(score_flipped,) = self.evaluate_tour_Q(tour)
if score_flipped > score:
tag = ACCEPT
else:
self.signs = -self.signs
tag = REJECT
self.flip_log("FLIPWHOLE", score, score_flipped, tag)
return tag
def flip_one(self, tour):
""" Test flipping every single contig sequentially to see if score
improves.
"""
n_accepts = n_rejects = 0
any_tag_ACCEPT = False
for i, t in enumerate(tour):
if i == 0:
(score,) = self.evaluate_tour_Q(tour)
self.signs[t] = -self.signs[t]
(score_flipped,) = self.evaluate_tour_Q(tour)
if score_flipped > score:
n_accepts += 1
tag = ACCEPT
else:
self.signs[t] = -self.signs[t]
n_rejects += 1
tag = REJECT
self.flip_log(
"FLIPONE ({}/{})".format(i + 1, len(self.signs)),
score,
score_flipped,
tag,
)
if tag == ACCEPT:
any_tag_ACCEPT = True
score = score_flipped
logging.debug("FLIPONE: N_accepts={} N_rejects={}".format(n_accepts, n_rejects))
return ACCEPT if any_tag_ACCEPT else REJECT
def prune_tour(self, tour, cpus):
""" Test deleting each contig and check the delta_score; tour here must
be an array of ints.
"""
while True:
(tour_score,) = self.evaluate_tour_M(tour)
logging.debug("Starting score: {}".format(tour_score))
active_sizes = self.active_sizes
M = self.M
args = []
for i, t in enumerate(tour):
stour = tour[:i] + tour[i + 1 :]
args.append((t, stour, tour_score, active_sizes, M))
# Parallel run
p = Pool(processes=cpus)
results = list(p.imap(prune_tour_worker, args))
assert len(tour) == len(
results
), "Array size mismatch, tour({}) != results({})".format(
len(tour), len(results)
)
# Identify outliers
active_contigs = self.active_contigs
idx, log10deltas = zip(*results)
lb, ub = outlier_cutoff(log10deltas)
logging.debug("Log10(delta_score) ~ [{}, {}]".format(lb, ub))
remove = set(active_contigs[x] for (x, d) in results if d < lb)
self.active -= remove
self.report_active()
tig_to_idx = self.tig_to_idx
tour = [active_contigs[x] for x in tour]
tour = array.array("i", [tig_to_idx[x] for x in tour if x not in remove])
if not remove:
break
self.tour = tour
self.flip_all(tour)
return tour
@property
def active_contigs(self):
return list(self.active)
@property
def active_sizes(self):
return np.array([self.tig_to_size[x] for x in self.active])
@property
def N(self):
return len(self.active)
@property
def oo(self):
return range(self.N)
@property
def tig_to_idx(self):
return dict((x, i) for (i, x) in enumerate(self.active))
@property
def M(self):
"""
Contact frequency matrix. Each cell contains how many inter-contig
links between i-th and j-th contigs.
"""
N = self.N
tig_to_idx = self.tig_to_idx
M = np.zeros((N, N), dtype=int)
for (at, bt), links in self.contacts.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
M[ai, bi] = M[bi, ai] = links
return M
@property
def O(self):
"""
Pairwise strandedness matrix. Each cell contains whether i-th and j-th
contig are the same orientation +1, or opposite orientation -1.
"""
N = self.N
tig_to_idx = self.tig_to_idx
O = np.zeros((N, N), dtype=int)
for (at, bt), (strandedness, md, mh) in self.orientations.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
score = strandedness * md
O[ai, bi] = O[bi, ai] = score
return O
@property
def P(self):
"""
Contact frequency matrix with better precision on distance between
contigs. In the matrix M, the distance is assumed to be the distance
between mid-points of two contigs. In matrix Q, however, we compute
harmonic mean of the links for the orientation configuration that is
shortest. This offers better precision for the distance between big
contigs.
"""
N = self.N
tig_to_idx = self.tig_to_idx
P = np.zeros((N, N, 2), dtype=int)
for (at, bt), (strandedness, md, mh) in self.orientations.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
P[ai, bi, 0] = P[bi, ai, 0] = md
P[ai, bi, 1] = P[bi, ai, 1] = mh
return P
@property
def Q(self):
"""
Contact frequency matrix when contigs are already oriented. This is s a
similar matrix as M, but rather than having the number of links in the
cell, it points to an array that has the actual distances.
"""
N = self.N
tig_to_idx = self.tig_to_idx
signs = self.signs
Q = np.ones((N, N, BB), dtype=int) * -1 # Use -1 as the sentinel
for (at, bt), k in self.contacts_oriented.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
ao = signs[ai]
bo = signs[bi]
Q[ai, bi] = k[(ao, bo)]
return Q
def hmean_int(a, a_min=5778, a_max=1149851):
""" Harmonic mean of an array, returns the closest int
"""
from scipy.stats import hmean
return int(round(hmean(np.clip(a, a_min, a_max))))
def golden_array(a, phi=1.61803398875, lb=LB, ub=UB):
""" Given list of ints, we aggregate similar values so that it becomes an
array of multiples of phi, where phi is the golden ratio.
phi ^ 14 = 843
phi ^ 33 = 7881196
So the array of counts go between 843 to 788196. One triva is that the
exponents of phi gets closer to integers as N grows. See interesting
discussion here:
<https://www.johndcook.com/blog/2017/03/22/golden-powers-are-nearly-integers/>
"""
counts = np.zeros(BB, dtype=int)
for x in a:
c = int(round(math.log(x, phi)))
if c < lb:
c = lb
if c > ub:
c = ub
counts[c - lb] += 1
return counts
def prune_tour_worker(arg):
""" Worker thread for CLMFile.prune_tour()
"""
from .chic import score_evaluate_M
t, stour, tour_score, active_sizes, M = arg
(stour_score,) = score_evaluate_M(stour, active_sizes, M)
delta_score = tour_score - stour_score
log10d = np.log10(delta_score) if delta_score > 1e-9 else -9
return t, log10d
def main():
actions = (
# LACHESIS output processing
("agp", "generate AGP file based on LACHESIS output"),
("score", "score the current LACHESIS CLM"),
# Simulation
("simulate", "simulate CLM data"),
# Scaffolding
("optimize", "optimize the contig order and orientation"),
("density", "estimate link density of contigs"),
# Plotting
("movieframe", "plot heatmap and synteny for a particular tour"),
("movie", "plot heatmap optimization history in a tourfile"),
# Reference-based analytics
("bam2mat", "convert bam file to .npy format used in plotting"),
("mergemat", "combine counts from multiple .npy data files"),
("heatmap", "plot heatmap based on .npy file"),
("dist", "plot distance distribution based on .dist.npy file"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def fit_power_law(xs, ys):
""" Fit power law distribution.
See reference:
http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html
Assumes the form Y = A * X^B, returns
Args:
xs ([int]): X vector
ys ([float64]): Y vector
Returns:
(A, B), the coefficients
"""
import math
sum_logXlogY, sum_logXlogX, sum_logX, sum_logY = 0, 0, 0, 0
N = len(xs)
for i in range(N):
if not xs[i] or not ys[i]:
continue
logXs, logYs = math.log(xs[i]), math.log(ys[i])
sum_logXlogY += logXs * logYs
sum_logXlogX += logXs * logXs
sum_logX += logXs
sum_logY += logYs
B = (N * sum_logXlogY - sum_logX * sum_logY) / (
N * sum_logXlogX - sum_logX * sum_logX
)
A = math.exp((sum_logY - B * sum_logX) / N)
logging.debug("Power law Y = {:.1f} * X ^ {:.4f}".format(A, B))
label = "$Y={:.1f} \\times X^{{ {:.4f} }}$".format(A, B)
return A, B, label
def dist(args):
"""
%prog dist input.dist.npy genome.json
Plot histogram based on .dist.npy data file. The .npy file stores an array
with link counts per dist bin, with the bin starts stored in the genome.json.
"""
import seaborn as sns
import pandas as pd
from jcvi.graphics.base import human_base_formatter, markup
p = OptionParser(dist.__doc__)
p.add_option("--title", help="Title of the histogram")
p.add_option("--xmin", default=300, help="Minimum distance")
p.add_option("--xmax", default=6000000, help="Maximum distance")
opts, args, iopts = p.set_image_options(args, figsize="6x6")
if len(args) != 2:
sys.exit(not p.print_help())
npyfile, jsonfile = args
pf = npyfile.rsplit(".", 1)[0]
header = json.loads(open(jsonfile).read())
distbin_starts = np.array(header["distbinstarts"], dtype="float64")
distbin_sizes = np.array(header["distbinsizes"], dtype="float64")
a = np.load(npyfile)
xmin, xmax = opts.xmin, opts.xmax
(size,) = min(distbin_sizes.shape, distbin_starts.shape, a.shape)
df = pd.DataFrame()
xstart, xend = (
np.searchsorted(distbin_starts, xmin),
| np.searchsorted(distbin_starts, xmax) | numpy.searchsorted |
#-- -- -- -- Intermediate Python
# Used for Data Scientist Training Path
#FYI it's a compilation of how to work
#with different commands.
####### -----> Matplotlib
### --------------------------------------------------------
## Line plot - ex#0
# Print the last item from year and pop
print(year[-1])
print(pop[-1])
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Make a line plot: year on the x-axis, pop on the y-axis
plt.plot(year, pop)
# Display the plot with plt.show()
plt.show()
### --------------------------------------------------------
## Line plot - ex#1
import matplotlib.pyplot as plt
# Print the last item of gdp_cap and life_exp
print(gdp_cap[-1])
print(life_exp[-1])
# Make a line plot, gdp_cap on the x-axis, life_exp on the y-axis
plt.plot(gdp_cap, life_exp)
# Display the plot
plt.show()
### --------------------------------------------------------
## Scatter Plot --- ex0
import matplotlib.pyplot as plt
# Change the line plot below to a scatter plot
plt.scatter(gdp_cap, life_exp)
# Put the x-axis on a logarithmic scale
plt.xscale('log')
# Show plot
plt.show()
### --------------------------------------------------------
## Scatter Plot --- ex1
# Import package
import matplotlib.pyplot as plt
# Build Scatter plot
plt.scatter(pop, life_exp)
# Show plot
plt.show()
## HISTOGRAMS
### --------------------------------------------------------
### -> Build a histogram
import matplotlib.pyplot as plt
# Create histogram of life_exp data
plt.hist(life_exp)
# Display histogram
plt.show()
### --------------------------------------------------------
## Build a histogram --- bins
import matplotlib.pyplot as plt
# Build histogram with 5 bins
plt.hist(life_exp, bins=5)
# Show and clean up plot
plt.show()
plt.clf()
# Build histogram with 20 bins
plt.hist(life_exp, bins=20)
# Show and clean up again
plt.show()
plt.clf()
### --------------------------------------------------------
## Build a histogram --- compare
import matplotlib.pyplot as plt
# Histogram of life_exp, 15 bins
plt.hist(life_exp, bins=15)
# Show and clear plot
plt.show()
plt.clf()
# Histogram of life_exp1950, 15 bins
plt.hist(life_exp1950, bins=15)
# Show and clear plot again
plt.show()
plt.clf()
### --------------------------------------------------------
# You're a professor teaching Data Science with Python,
# and you want to visually assess if the grades on
# your exam follow a particular distribution.
# Which plot do you use?
# R/ Histogram
### --------------------------------------------------------
# You're a professor in Data Analytics with Python, and you
# want to visually assess if longer answers on exam
# questions lead to higher grades.
# Which plot do you use?
# Scatter plot
### --------------------------------------------------------
### Labels
import matplotlib.pyplot as plt
# Basic scatter plot, log scale
plt.scatter(gdp_cap, life_exp)
plt.xscale('log')
# Strings
xlab = 'GDP per Capita [in USD]'
ylab = 'Life Expectancy [in years]'
title = 'World Development in 2007'
# Add axis labels
plt.xlabel(xlab)
plt.ylabel(ylab)
# Add title
plt.title(title)
# After customizing, display the plot
plt.show()
### --------------------------------------------------------
## Ticks
import matplotlib.pyplot as plt
# Scatter plot
plt.scatter(gdp_cap, life_exp)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
# Definition of tick_val and tick_lab
tick_val = [1000, 10000, 100000]
tick_lab = ['1k', '10k', '100k']
# Adapt the ticks on the x-axis
plt.xticks(tick_val, tick_lab)
# After customizing, display the plot
plt.show()
### --------------------------------------------------------
#Sizes
# Import numpy as np
import numpy as np
# Store pop as a numpy array: np_pop
np_pop = np.array(pop)
# Double np_pop
np_pop = np_pop*2
# Update: set s argument to np_pop
plt.scatter(gdp_cap, life_exp, s = np_pop)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000, 10000, 100000],['1k', '10k', '100k'])
# Display the plot
plt.show()
### --------------------------------------------------------
### Colors
import matplotlib.pyplot as plt
# Specify c and alpha inside plt.scatter()
plt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Show the plot
plt.show()
### --------------------------------------------------------
## Additional Customizations
# Scatter plot
plt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Additional customizations
plt.text(1550, 71, 'India')
plt.text(5700, 80, 'China')
# Add grid() call
plt.grid(True)
# Show the plot
plt.show()
### --------------------------------------------------------
#### INTERPRETATION
# If you have a look at your colorful plot,
# it's clear that people live longer in countries with a
# higher GDP per capita. No high income countries have r
# eally short life expectancy, and no low income countries
# have very long life expectancy. Still, there is a huge
# difference in life expectancy between countries on the same
# income level. Most people live in middle income countries
# where difference in lifespan is huge between countries;
# depending on how income is distributed and how it is used.
# What can you say about the plot?
## R/ The countries in blue, corresponding to Africa, have
# both low life expectancy and a low GDP per capita.
### Dictionaries, Part 1
### --------------------------------------------------------
### --->Motivation for dictionaries
# Definition of countries and capital
countries = ['spain', 'france', 'germany', 'norway']
capitals = ['madrid', 'paris', 'berlin', 'oslo']
# Get index of 'germany': ind_ger
ind_ger = countries.index('germany')
# Use ind_ger to print out capital of Germany
print(capitals[ind_ger])
### --------------------------------------------------------
## Create dictionary
# Definition of countries and capital
countries = ['spain', 'france', 'germany', 'norway']
capitals = ['madrid', 'paris', 'berlin', 'oslo']
# From string in countries and capitals, create dictionary europe
europe = {
'spain':'madrid',
"france":"paris",
"germany":"berlin",
"norway":"oslo"}
# Print europe
print(europe)
### --------------------------------------------------------
## Access dictionary
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin', 'norway':'oslo' }
# Print out the keys in europe
print(europe.keys())
# Print out value that belongs to key 'norway'
print(europe['norway'])
### Dictionaries, Part 2
### --------------------------------------------------------
### ---> Dictionary Manipulation - ex 0
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin', 'norway':'oslo' }
# Add italy to europe
europe["italy"] = 'rome'
# Print out italy in europe
print('italy' in europe)
# Add poland to europe
europe["poland"] = 'warsaw'
# Print europe
print(europe)
### --------------------------------------------------------
### ---> Dictionary Manipulation - ex 1
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'bonn',
'norway':'oslo', 'italy':'rome', 'poland':'warsaw',
'australia':'vienna' }
# Update capital of germany
europe['germany'] = 'berlin'
# Remove australia
del(europe['australia'])
# Print europe
print(europe)
### --------------------------------------------------------
## Dictionariception
# Dictionary of dictionaries
europe = { 'spain': { 'capital':'madrid', 'population':46.77 },
'france': { 'capital':'paris', 'population':66.03 },
'germany': { 'capital':'berlin', 'population':80.62 },
'norway': { 'capital':'oslo', 'population':5.084 } }
# Print out the capital of France
print(europe['france']['capital'])
# Create sub-dictionary data
data = {'capital': 'rome', 'population': 59.83}
# Add data to europe under key 'italy'
europe['italy'] = data
# Print europe
print(europe)
### Pandas, Part 1
### --------------------------------------------------------
#### ---->>> Dictionary to DataFrame -- ex#0
# Pre-defined lists
names = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt']
dr = [True, False, False, False, True, True, True]
cpc = [809, 731, 588, 18, 200, 70, 45]
# Import pandas as pd
import pandas as pd
# Create dictionary my_dict with three key:value pairs: my_dict
my_dict = {'country': names, 'drives_right': dr, 'cars_per_cap': cpc}
# Build a DataFrame cars from my_dict: cars
cars = pd.DataFrame(my_dict)
# Print cars
print(cars)
### --------------------------------------------------------
#### ---->>> Dictionary to DataFrame -- ex#0
import pandas as pd
# Build cars DataFrame
names = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt']
dr = [True, False, False, False, True, True, True]
cpc = [809, 731, 588, 18, 200, 70, 45]
cars_dict = { 'country':names, 'drives_right':dr, 'cars_per_cap':cpc }
cars = pd.DataFrame(cars_dict)
print(cars)
# Definition of row_labels
row_labels = ['US', 'AUS', 'JPN', 'IN', 'RU', 'MOR', 'EG']
# Specify row labels of cars
cars.index = row_labels
# Print cars again
print(cars)
### --------------------------------------------------------
### CSV to DataFrame --- ex#0
# Import pandas as pd
import pandas as pd
# Import the cars.csv data: cars
cars = pd.read_csv('cars.csv')
# Print out cars
print(cars)
### --------------------------------------------------------
### CSV to DataFrame --- ex#1
# Import pandas as pd
import pandas as pd
# Fix import by including index_col
cars = pd.read_csv('cars.csv', index_col=0)
# Print out cars
print(cars)
### Pandas, Part 2
### --------------------------------------------------------
### ----> Square Brackets -- ex#0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out country column as Pandas Series
print(cars['country'])
# Print out country column as Pandas DataFrame
print(cars[['country']])
# Print out DataFrame with country and drives_right columns
print(cars[['country', 'drives_right']])
### --------------------------------------------------------
### ----> Square Brackets -- ex#1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out first 3 observations
print(cars[0:3])
# Print out fourth, fifth and sixth observation
print(cars[3:6])
### --------------------------------------------------------
### ---> loc and iloc -- ex#0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out observation for Japan
print(cars.loc['JPN'])
# Print out observations for Australia and Egypt
print(cars.loc[['AUS', 'EG']])
### --------------------------------------------------------
### ---> loc and iloc -- ex#1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out drives_right value of Morocco
print(cars.loc['MOR', 'drives_right'])
# Print sub-DataFrame
print(cars.loc[['RU', 'MOR'], ['country', 'drives_right']])
### --------------------------------------------------------
### ---> loc and iloc -- ex#2
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out drives_right column as Series
print(cars.loc[:, 'drives_right'])
# Print out drives_right column as DataFrame
print(cars.loc[:, ['drives_right']])
# Print out cars_per_cap and drives_right as DataFrame
print(cars.loc[:, ['cars_per_cap', 'drives_right']])
## Comparison Operators
### --------------------------------------------------------
### ---> Equality
# Comparison of booleans
True != False
# Comparison of integers
-5 * 15 != 75
# Comparison of strings
"pyscript" != "PyScript"
# Compare a boolean with an integer
True == 1
### --------------------------------------------------------
### ---> Greater and less than
# Comparison of integers
x = -3 * 6
print(x >= -10)
# Comparison of strings
y = "test"
print('test' <= y)
# Comparison of booleans
print(True > False)
### --------------------------------------------------------
### ---> Compare arrays
# Create arrays
import numpy as np
my_house = np.array([18.0, 20.0, 10.75, 9.50])
your_house = np.array([14.0, 24.0, 14.25, 9.0])
# my_house greater than or equal to 18
print(my_house >= 18)
# my_house less than your_house
print(my_house < your_house)
#---- Boolean Operators
### --------------------------------------------------------
## and, or, not -- ex#0
# Define variables
my_kitchen = 18.0
your_kitchen = 14.0
# my_kitchen bigger than 10 and smaller than 18?
print(my_kitchen > 10 and my_kitchen < 18)
# my_kitchen smaller than 14 or bigger than 17?
print(my_kitchen < 14 or my_kitchen > 17)
# Double my_kitchen smaller than triple your_kitchen?
print((my_kitchen * 2) < (your_kitchen * 3))
### --------------------------------------------------------
## and, or, not -- ex#1
# To see if you completely understood the boolean
# operators, have a look at the following piece of Python code:
# x = 8
# y = 9
# not(not(x < 3) and not(y > 14 or y > 10))
# What will the result be if you execute these three
# commands in the IPython Shell?
# NB: Notice that not has a higher priority
# than and and or, it is executed first.
# R/ False
### --------------------------------------------------------
## Boolean operators with Numpy
# Create arrays
import numpy as np
my_house = np.array([18.0, 20.0, 10.75, 9.50])
your_house = np.array([14.0, 24.0, 14.25, 9.0])
# my_house greater than 18.5 or smaller than 10
print(np.logical_or(my_house > 18.5,
my_house < 10))
# Both my_house and your_house smaller than 11
print(np.logical_and(my_house < 11,
your_house < 11))
### --------------------------------------------------------
## ----> if, elif, else
### Warmup
# To experiment with if and else a bit, have a look at this code sample:
# area = 10.0
# if(area < 9) :
# print("small")
# elif(area < 12) :
# print("medium")
# else :
# print("large")
# What will the output be if you run this piece of code in the IPython Shell?
# R/ medium
### --------------------------------------------------------
## if example
# Define variables
room = "kit"
area = 14.0
# if statement for room
if room == "kit" :
print("looking around in the kitchen.")
# if statement for area
if area > 15:
print("big place!")
### --------------------------------------------------------
## Add else
# Define variables
room = "kit"
area = 14.0
# if-else construct for room
if room == "kit" :
print("looking around in the kitchen.")
else :
print("looking around elsewhere.")
# if-else construct for area
if area > 15 :
print("big place!")
else:
print("pretty small.")
### --------------------------------------------------------
## Customize further: elif
# Define variables
room = "bed"
area = 14.0
# if-elif-else construct for room
if room == "kit" :
print("looking around in the kitchen.")
elif room == "bed":
print("looking around in the bedroom.")
else :
print("looking around elsewhere.")
# if-elif-else construct for area
if area > 15 :
print("big place!")
elif area > 10:
print("medium size, nice!")
else :
print("pretty small.")
## Filtering pandas DataFrames
### --------------------------------------------------------
### ---> Driving right - ex#0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Extract drives_right column as Series: dr
dr = cars['drives_right']
# Use dr to subset cars: sel
sel = cars[dr]
# Print sel
print(sel)
### --------------------------------------------------------
### ---> Driving right - ex#1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Convert code to a one-liner
sel = cars[cars['drives_right']]
# Print sel
print(sel)
### --------------------------------------------------------
### ---> Cars per capita # ex0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Create car_maniac: observations that have a cars_per_cap over 500
cpc = cars['cars_per_cap']
many_cars = cpc > 500
car_maniac = cars[many_cars]
# Print car_maniac
print(car_maniac)
### --------------------------------------------------------
### ---> Cars per capita # ex1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Import numpy, you'll need this
import numpy as np
# Create medium: observations with cars_per_cap between 100 and 500
cpc = cars['cars_per_cap']
between = np.logical_and(cpc > 100, cpc < 500)
medium = cars[between]
# Print medium
print(medium)
## while loop
### --------------------------------------------------------
## ----> while: warming up
# The while loop is like a repeated if statement.
# The code is executed over and over again, as long as
# the condition is True. Have another look at its recipe.
# while condition :
# expression
# Can you tell how many printouts the following while loop will do?
# x = 1
# while x < 4 :
# print(x)
# x = x + 1
## R/ 3
### --------------------------------------------------------
## Basic while loop
# Initialize offset
offset = 8
# Code the while loop
while offset != 0:
print("correcting...")
offset = offset - 1
print(offset)
### --------------------------------------------------------
## Add conditionals
# Initialize offset
offset = -6
# Code the while loop
while offset != 0 :
print("correcting...")
if offset > 0:
offset = offset - 1
else:
offset = offset + 1
print(offset)
## for loop
### --------------------------------------------------------
# -- Loop over a list
# areas list
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Code the for loop
for a in areas:
print(a)
### --------------------------------------------------------
# Indexes and values - ex#0
# areas list
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Change for loop to use enumerate()
for index, a in enumerate(areas):
print("room " + str(index) + ": " + str(a))
### --------------------------------------------------------
# Indexes and values - ex#1
# areas list
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Code the for loop
for index, area in enumerate(areas) :
print("room " + str(index+1) + ": " + str(area))
### --------------------------------------------------------
## Loop over list of lists
# house list of lists
house = [["hallway", 11.25],
["kitchen", 18.0],
["living room", 20.0],
["bedroom", 10.75],
["bathroom", 9.50]]
# Build a for loop from scratch
for room, area in house :
print("the " + str(room) + " is " + str(area) + " sqm")
### Loop Data Structures Part 1
### --------------------------------------------------------
## ---> Loop over dictionary
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin',
'norway':'oslo', 'italy':'rome', 'poland':'warsaw', 'austria':'vienna' }
# Iterate over europe
for k, v in europe.items():
print("the capital of " + str(k) + " is " + str(v))
### --------------------------------------------------------
## ---> Loop over Numpy array
# Import numpy as np
import numpy as np
# For loop over np_height
for x in np_height:
print(str(x) + " inches")
# For loop over np_baseball
for x in np.nditer(np_baseball):
print(x)
#### Loop Data Structures Part 2
### --------------------------------------------------------
###------> Loop over DataFrame -- ex#0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Iterate over rows of cars
for lab, row in cars.iterrows():
print(lab)
print(row)
### --------------------------------------------------------
###------> Loop over DataFrame -- ex#1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Adapt for loop
for lab, row in cars.iterrows() :
print(str(lab) + ": " + str(row["cars_per_cap"]))
### --------------------------------------------------------
## Add column - ex#0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Code for loop that adds COUNTRY column
for lab, row in cars.iterrows():
cars.loc[lab, 'COUNTRY'] = row['country'].upper()
# Print cars
print(cars)
### --------------------------------------------------------
## Add column - ex#1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Use .apply(str.upper)
cars['COUNTRY'] = cars['country'].apply(str.upper)
### Random Numbers
### --------------------------------------------------------
### ---> Random float
# Import numpy as np
import numpy as np
# Set the seed
np.random.seed(123)
# Generate and print random float
print(np.random.rand())
### --------------------------------------------------------
### ----> Roll the dice
# Import numpy and set seed
import numpy as np
np.random.seed(123)
# Use randint() to simulate a dice
print(np.random.randint(1, 7))
# Use randint() again
print(np.random.randint(1, 7))
### --------------------------------------------------------
### ---->Determine your next move
# Import numpy and set seed
import numpy as np
np.random.seed(123)
# Starting step
step = 50
# Roll the dice
dice = np.random.randint(1, 7)
# Finish the control construct
if dice <= 2:
step = step - 1
elif dice < 6:
step = step + 1
else:
step = step + np.random.randint(1,7)
# Print out dice and step
print(dice)
print(step)
### Random Walk
### --------------------------------------------------------
## ---> The next step
# Import numpy and set seed
import numpy as np
np.random.seed(123)
# Initialize random_walk
random_walk = [0]
# Complete the loop
for x in range(100):
# Set step: last element in random_walk
step = random_walk[-1]
# Roll the dice
dice = np.random.randint(1,7)
# Determine next step
if dice <= 2:
step = step - 1
elif dice <= 5:
step = step + 1
else:
step = step + np.random.randint(1,7)
# append next_step to random_walk
random_walk.append(step)
# Print random_walk
print(random_walk)
### --------------------------------------------------------
### ---> How low can you go?
# Import numpy and set seed
import numpy as np
np.random.seed(123)
# Initialize random_walk
random_walk = [0]
for x in range(100) :
step = random_walk[-1]
dice = np.random.randint(1,7)
if dice <= 2:
# Replace below: use max to make sure step can't go below 0
step = max(0, step - 1)
elif dice <= 5:
step = step + 1
else:
step = step + np.random.randint(1,7)
random_walk.append(step)
print(random_walk)
### --------------------------------------------------------
### ---> Visualize the walk
# Initialization
import numpy as np
np.random.seed(123)
random_walk = [0]
for x in range(100) :
step = random_walk[-1]
dice = | np.random.randint(1,7) | numpy.random.randint |
'''
Created on 28.04.2016
@author: lemmerfn
'''
from abc import ABC, abstractmethod
import weakref
from functools import total_ordering
import pandas as pd
import pysubgroup as ps
from itertools import chain
import copy
import numpy as np
@total_ordering
class SelectorBase(ABC):
__refs__ = weakref.WeakSet()
def __new__(cls, *args, **kwargs):
tmp = super().__new__(cls)
tmp.set_descriptions(*args, **kwargs)
if tmp in SelectorBase.__refs__:
for ref in SelectorBase. __refs__:
if ref == tmp:
return ref
return tmp
def __init__(self):
SelectorBase.__refs__.add(self)
def __eq__(self, other):
if other is None:
return False
return repr(self) == repr(other)
def __lt__(self, other):
return repr(self) < repr(other)
def __hash__(self):
return self._hash #pylint: disable=no-member
@abstractmethod
def set_descriptions(self, *args, **kwargs):
pass
def get_cover_array_and_size(subgroup, data_len=None, data=None):
if hasattr(subgroup, "representation"):
cover_arr = subgroup
size = subgroup.size_sg
elif isinstance(subgroup, slice):
cover_arr = subgroup
if data_len is None:
if isinstance(data, pd.DataFrame):
data_len = len(data)
else:
raise ValueError("if you pass a slice, you need to pass either data_len or data")
# https://stackoverflow.com/questions/36188429/retrieve-length-of-slice-from-slice-object-in-python
size = len(range(*subgroup.indices(data_len)))
elif hasattr(subgroup, '__array_interface__'):
cover_arr = subgroup
type_char = subgroup.__array_interface__['typestr'][1]
if type_char == 'b': # boolean indexing is used
size = np.count_nonzero(cover_arr)
elif type_char == 'u' or type_char == 'i': # integer indexing
size = subgroup.__array_interface__['shape'][0]
else:
print(type_char)
raise NotImplementedError(f"Currently a typechar of {type_char} is not supported.")
else:
assert isinstance(data, pd.DataFrame)
cover_arr = subgroup.covers(data)
size = np.count_nonzero(cover_arr)
return cover_arr, size
def get_size(subgroup, data_len=None, data=None):
if hasattr(subgroup, "representation"):
size = subgroup.size_sg
elif isinstance(subgroup, slice):
if data_len is None:
if isinstance(data, pd.DataFrame):
data_len = len(data)
else:
raise ValueError("if you pass a slice, you need to pass either data_len or data")
# https://stackoverflow.com/questions/36188429/retrieve-length-of-slice-from-slice-object-in-python
size = len(range(*subgroup.indices(data_len)))
elif hasattr(subgroup, '__array_interface__'):
type_char = subgroup.__array_interface__['typestr'][1]
if type_char == 'b': # boolean indexing is used
size = np.count_nonzero(subgroup)
elif type_char == 'u' or type_char == 'i': # integer indexing
size = subgroup.__array_interface__['shape'][0]
else:
print(type_char)
raise NotImplementedError(f"Currently a typechar of {type_char} is not supported.")
else:
assert isinstance(data, pd.DataFrame)
size = np.count_nonzero(subgroup.covers(data))
return size
class EqualitySelector(SelectorBase):
def __init__(self, attribute_name, attribute_value, selector_name=None):
if attribute_name is None:
raise TypeError()
if attribute_value is None:
raise TypeError()
self._attribute_name = attribute_name
self._attribute_value = attribute_value
self._selector_name = selector_name
self.set_descriptions(self._attribute_name, self._attribute_value, self._selector_name)
super().__init__()
@property
def attribute_name(self):
return self._attribute_name
@property
def attribute_value(self):
return self._attribute_value
def set_descriptions(self, attribute_name, attribute_value, selector_name=None): # pylint: disable=arguments-differ
self._hash, self._query, self._string = EqualitySelector.compute_descriptions(attribute_name, attribute_value, selector_name=selector_name)
@classmethod
def compute_descriptions(cls, attribute_name, attribute_value, selector_name):
if isinstance(attribute_value, (str, bytes)):
query = str(attribute_name) + "==" + "'" + str(attribute_value) + "'"
elif np.isnan(attribute_value):
query = attribute_name + ".isnull()"
else:
query = str(attribute_name) + "==" + str(attribute_value)
if selector_name is not None:
string_ = selector_name
else:
string_ = query
hash_value = hash(query)
return (hash_value, query, string_)
def __repr__(self):
return self._query
def covers(self, data):
row = data[self.attribute_name].to_numpy()
if pd.isnull(self.attribute_value):
return pd.isnull(row)
return row == self.attribute_value
def __str__(self, open_brackets="", closing_brackets=""):
return open_brackets + self._string + closing_brackets
@property
def selectors(self):
return (self,)
class NegatedSelector(SelectorBase):
def __init__(self, selector):
self._selector = selector
self.set_descriptions(selector)
super().__init__()
def covers(self, data_instance):
return np.logical_not(self._selector.covers(data_instance))
def __repr__(self):
return self._query
def __str__(self, open_brackets="", closing_brackets=""):
return "NOT " + self._selector.__str__(open_brackets, closing_brackets)
def set_descriptions(self, selector): # pylint: disable=arguments-differ
self._query = "(not " + repr(selector) + ")"
self._hash = hash(repr(self))
@property
def attribute_name(self):
return self._selector.attribute_name
@property
def selectors(self):
return self._selector.selectors
# Including the lower bound, excluding the upper_bound
class IntervalSelector(SelectorBase):
def __init__(self, attribute_name, lower_bound, upper_bound, selector_name=None):
self._attribute_name = attribute_name
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self.selector_name = selector_name
self.set_descriptions(attribute_name, lower_bound, upper_bound, selector_name)
super().__init__()
@property
def attribute_name(self):
return self._attribute_name
@property
def lower_bound(self):
return self._lower_bound
@property
def upper_bound(self):
return self._upper_bound
def covers(self, data_instance):
val = data_instance[self.attribute_name].to_numpy()
return np.logical_and((val >= self.lower_bound), (val < self.upper_bound))
def __repr__(self):
return self._query
def __hash__(self):
return self._hash
def __str__(self):
return self._string
@classmethod
def compute_descriptions(cls, attribute_name, lower_bound, upper_bound, selector_name=None):
if selector_name is None:
_string = cls.compute_string(attribute_name, lower_bound, upper_bound, rounding_digits=2)
else:
_string = selector_name
_query = cls.compute_string(attribute_name, lower_bound, upper_bound, rounding_digits=None)
_hash = _query.__hash__()
return (_hash, _query, _string)
def set_descriptions(self, attribute_name, lower_bound, upper_bound, selector_name=None): # pylint: disable=arguments-differ
self._hash, self._query, self._string = IntervalSelector.compute_descriptions(attribute_name, lower_bound, upper_bound, selector_name=selector_name)
@classmethod
def compute_string(cls, attribute_name, lower_bound, upper_bound, rounding_digits):
if rounding_digits is None:
formatter = "{}"
else:
formatter = "{0:." + str(rounding_digits) + "f}"
ub = upper_bound
lb = lower_bound
if ub % 1:
ub = formatter.format(ub)
if lb % 1:
lb = formatter.format(lb)
if lower_bound == float("-inf") and upper_bound == float("inf"):
repre = attribute_name + "= anything"
elif lower_bound == float("-inf"):
repre = attribute_name + "<" + str(ub)
elif upper_bound == float("inf"):
repre = attribute_name + ">=" + str(lb)
else:
repre = attribute_name + ": [" + str(lb) + ":" + str(ub) + "["
return repre
@property
def selectors(self):
return (self,)
def create_selectors(data, nbins=5, intervals_only=True, ignore=None, value_restriction=None):
if ignore is None:
ignore = []
sels = create_nominal_selectors(data, ignore, value_restriction=value_restriction)
sels.extend(create_numeric_selectors(data, nbins, intervals_only, ignore=ignore, value_restriction=value_restriction))
return sels
def create_nominal_selectors(data, ignore=None, value_restriction=None):
if ignore is None:
ignore = []
nominal_selectors = []
# for attr_name in [x for x in data.select_dtypes(exclude=['number']).columns.values if x not in ignore]:
# nominal_selectors.extend(create_nominal_selectors_for_attribute(data, attr_name))
nominal_dtypes = data.select_dtypes(exclude=['number'])
dtypes = data.dtypes
# print(dtypes)
for attr_name in [x for x in nominal_dtypes.columns.values if x not in ignore]:
nominal_selectors.extend(create_nominal_selectors_for_attribute(data, attr_name, dtypes, value_restriction=value_restriction))
return nominal_selectors
def create_nominal_selectors_for_attribute(data, attribute_name, dtypes=None, value_restriction=None):
nominal_selectors = []
for val in pd.unique(data[attribute_name]):
if value_restriction is not None:
nominal_selectors.append(EqualitySelector(attribute_name, value_restriction))
else:
nominal_selectors.append(EqualitySelector(attribute_name, val))
# setting the is_bool flag for selector
if dtypes is None:
dtypes = data.dtypes
if dtypes[attribute_name] == 'bool':
for s in nominal_selectors:
s.is_bool = True
return nominal_selectors
def create_numeric_selectors(data, nbins=5, intervals_only=True, weighting_attribute=None, ignore=None, value_restriction=None):
if ignore is None:
ignore = []
numeric_selectors = []
for attr_name in [x for x in data.select_dtypes(include=['number']).columns.values if x not in ignore]:
numeric_selectors.extend(create_numeric_selectors_for_attribute(
data, attr_name, nbins, intervals_only, weighting_attribute, value_restriction=value_restriction))
return numeric_selectors
def create_numeric_selectors_for_attribute(data, attr_name, nbins=5, intervals_only=True, weighting_attribute=None, value_restriction=None):
numeric_selectors = []
data_not_null = data[data[attr_name].notnull()]
uniqueValues = | np.unique(data_not_null[attr_name]) | numpy.unique |
""" Deposition of energy from low-energy photons
As detailed in section III.F.1 of the paper, low-energy photons (sub-3keV photons) may deposit their energy into the IGM through hydrogen/helium ionization, hydrogen excitation, and by contributing to continuum photons. By contributing to continuum photons, we mean that a certain component of low-energy photons (sub-10.2eV photons) have insufficient energy to effectively interact with the IGM, so they just free-stream and can be thought of as an additional component of the CMB continuum.
"""
import sys
sys.path.append("../..")
import numpy as np
import darkhistory.physics as phys
import darkhistory.spec.spectools as spectools
import time
def get_kappa_2s(photspec):
""" Compute kappa_2s for use in kappa_DM function
Parameters
----------
photspec : Spectrum object
spectrum of photons. spec.toteng() should return Energy per baryon.
Returns
-------
kappa_2s : float
The added photoionization rate from the 1s to the 2s state due to DM photons.
"""
# Convenient Variables
eng = photspec.eng
rs = photspec.rs
Lambda = phys.width_2s1s_H
Tcmb = phys.TCMB(rs)
lya_eng = phys.lya_eng
# Photon phase space density (E >> kB*T approximation)
def Boltz(E):
return np.exp(-E/Tcmb)
bounds = spectools.get_bin_bound(eng)
mid = spectools.get_indx(bounds, lya_eng/2)
# Phase Space Density of DM
f_nu = photspec.dNdE * phys.c**3 / (
8 * np.pi * (eng/phys.hbar)**2
)
# Complementary (E - h\nu) phase space density of DM
f_nu_p = np.zeros(mid)
# Index of point complementary to eng[k]
comp_indx = spectools.get_indx(bounds, lya_eng - eng[0])
# Find the bin in which lya_eng - eng[k] resides. Store f_nu of that bin in f_nu_p.
for k in np.arange(mid):
while (lya_eng - eng[k]) < bounds[comp_indx]:
comp_indx -= 1
f_nu_p[k] = f_nu[comp_indx]
# Setting up the numerical integration
# Bin sizes
diffs = np.append(bounds[1:mid], lya_eng/2) - np.insert(bounds[1:mid], 0, 0)
diffs /= (2 * np.pi * phys.hbar)
dLam_dnu = phys.get_dLam2s_dnu()
rates = dLam_dnu(eng[:mid]/(2 * np.pi * phys.hbar))
boltz = Boltz(eng[:mid])
boltz_p = Boltz(lya_eng - eng[:mid])
# The Numerical Integral
kappa_2s = np.sum(
diffs * rates * (f_nu[:mid] + boltz) * (f_nu_p + boltz_p)
)/phys.width_2s1s_H - Boltz(lya_eng)
return kappa_2s
def kappa_DM(photspec, xe):
""" Compute kappa_DM of the modified tla.
Parameters
----------
photspec : Spectrum object
spectrum of photons. Assumed to be in dNdE mode. spec.toteng() should return Energy per baryon.
Returns
-------
kappa_DM : float
The added photoionization rate due to products of DM.
"""
eng = photspec.eng
rs = photspec.rs
rate_2p1s_times_x1s = (
8 * np.pi * phys.hubble(rs)/
(3*(phys.nH * rs**3 * (phys.c/phys.lya_freq)**3))
)
x1s_times_R_Lya = rate_2p1s_times_x1s
Lambda = phys.width_2s1s_H
# The bin number containing 10.2eV
lya_index = spectools.get_indx(eng, phys.lya_eng)
# The bins between 10.2eV and 13.6eV
exc_bounds = spectools.get_bounds_between(
eng, phys.lya_eng, phys.rydberg
)
# Effect on 2p state due to DM products
kappa_2p = (
photspec.dNdE[lya_index] * phys.nB * rs**3 *
np.pi**2 * (phys.hbar * phys.c)**3 / phys.lya_eng**2
)
# Effect on 2s state
kappa_2s = get_kappa_2s(photspec)
return (
kappa_2p*3*x1s_times_R_Lya/4 + kappa_2s*(1-xe)*Lambda/4
)/(3*x1s_times_R_Lya/4 + (1-xe)*Lambda/4)
#---- f_c functions ----#
#continuum
def getf_continuum(photspec, norm_fac, cross_check=False):
# All photons below 10.2eV get deposited into the continuum
if not cross_check:
return photspec.toteng(
bound_type='eng',
bound_arr=np.array([photspec.eng[0],phys.lya_eng])
)[0] * norm_fac
else:
return np.dot(
photspec.N[photspec.eng < 10.2],
photspec.eng[photspec.eng < 10.2]*norm_fac
)
#excitation
def getf_excitation(photspec, norm_fac, dt, xe, n, method, cross_check=False):
if((method == 'old') or (method=='helium') or (method == 'ion')):
# All photons between 11.2eV and 13.6eV are deposited into excitation
# partial binning
if not cross_check:
tot_excite_eng = (
photspec.toteng(
bound_type='eng',
bound_arr=np.array([phys.lya_eng,phys.rydberg])
)[0]
)
else:
tot_excite_eng = np.dot(
photspec.N[(photspec.eng >= 10.2) & (photspec.eng <= 13.6)],
photspec.eng[(photspec.eng >= 10.2) & (photspec.eng <= 13.6)]
)
f_excite_HI = tot_excite_eng * norm_fac
else:
# Only photons in the 10.2eV bin participate in 1s->2p excitation.
# 1s->2s transition handled more carefully.
# Convenient variables
kappa = kappa_DM(photspec, xe)
# Added this line since rate_2p1s_times_x1s function was removed.
rate_2p1s_times_x1s = (
8 * np.pi * phys.hubble(photspec.rs)/
(3*(phys.nH * photspec.rs**3 * (phys.c/phys.lya_freq)**3))
)
f_excite_HI = (
kappa * (
3*rate_2p1s_times_x1s*phys.nH + phys.width_2s1s_H*n[0]
) *
phys.lya_eng * (norm_fac / phys.nB / photspec.rs**3 * dt)
)
return f_excite_HI
#HI, HeI, HeII ionization
def getf_ion(photspec, norm_fac, n, method, cross_check=False):
# The bin number containing 10.2eV
lya_index = spectools.get_indx(photspec.eng, phys.lya_eng)
# The bin number containing 13.6eV
ryd_index = spectools.get_indx(photspec.eng, phys.rydberg)
if method == 'old':
# All photons above 13.6 eV deposit their 13.6eV into HI ionization
#!!! The factor of 10 is probably unecessary
if not cross_check:
tot_ion_eng = phys.rydberg * photspec.totN(
bound_type='eng',
bound_arr=np.array([phys.rydberg, 10*photspec.eng[-1]])
)[0]
else:
tot_ion_eng = phys.rydberg*np.sum(
photspec.N[photspec.eng > 13.6]
)
f_HI = tot_ion_eng * norm_fac
f_HeI = 0
f_HeII = 0
elif method == 'helium':
# Neglect HeII photoionization
# !!! Not utilizing partial binning!
rates = np.array([
n[i]*phys.photo_ion_xsec(photspec.eng, chan)
for i,chan in enumerate(['HI', 'HeI'])
])
norm_prob = np.sum(rates, axis=0)
prob = np.array([
np.divide(
rate, norm_prob,
out = np.zeros_like(photspec.eng),
where=(photspec.eng > phys.rydberg)
) for rate in rates
])
ion_eng_H = phys.rydberg * np.sum(prob[0] * photspec.N)
ion_eng_He = phys.He_ion_eng * np.sum(prob[1] * photspec.N)
f_HI = ion_eng_H * norm_fac
f_HeI = ion_eng_He * norm_fac
f_HeII = 0
else:
# HL: Not sure if this code is right.......
# Photons may also deposit their energy into HeI and HeII single ionization
# Bin boundaries of photon spectrum capable of photoionization, and number of photons in those bounds.
ion_bounds = spectools.get_bounds_between(photspec.eng, phys.rydberg)
ion_Ns = photspec.totN(bound_type='eng', bound_arr=ion_bounds)
# Probability of being absorbed within time step dt in channel a is P_a = \sigma(E)_a n_a c*dt
ionHI, ionHeI, ionHeII = [
phys.photo_ion_xsec(photspec.eng[ryd_index:],channel) * n[i]
for i,channel in enumerate(['HI','HeI','HeII'])
]
# The first energy might be less than 13.6, meaning no photo-ionization.
# The photons in this box are hopefully all between 13.6 and 24.6, so they can only ionize H
if photspec.eng[ryd_index] < phys.rydberg:
ionHI[0] = 1
# Relative likelihood of photoionization of HI is then P_HI/sum(P_a)
totList = ionHI + ionHeI + ionHeII + 1e-12
ionHI, ionHeI, ionHeII = [
llist/totList for llist in [ionHI, ionHeI, ionHeII]
]
f_HI, f_HeI, f_HeII = [
np.sum(ion_Ns * llist * norm_fac)
for llist in [
phys.rydberg*ionHI,
phys.He_ion_eng*ionHeI,
4*phys.rydberg*ionHeII
]
]
return (f_HI, f_HeI, f_HeII)
def compute_fs(photspec, x, dE_dVdt_inj, dt, method='old', cross_check=False):
""" Compute f(z) fractions for continuum photons, photoexcitation of HI, and photoionization of HI, HeI, HeII
Given a spectrum of deposited photons, resolve its energy into continuum photons,
continuum photons, HI excitation, and HI, HeI, HeII ionization in that order.
Parameters
----------
photspec : Spectrum object
spectrum of photons. spec.toteng() should return energy per baryon.
x : list of floats
number of (HI, HeI, HeII) divided by nH at redshift photspec.rs
dE_dVdt_inj : float
energy injection rate DM, dE/dVdt |_inj
dt : float
time in seconds over which these photons were deposited.
method : {'old','ion','new'}
'old': All photons >= 13.6eV ionize hydrogen, within [10.2, 13.6)eV excite hydrogen, < 10.2eV are labelled continuum.
'ion': Same as 'old', but now photons >= 13.6 can ionize HeI and HeII also.
'new': Same as 'ion', but now [10.2, 13.6)eV photons treated more carefully.
Returns
-------
tuple of floats
Ratio of deposited energy to a given channel over energy deposited by DM.
The order of the channels is {continuum photons, HI excitation, HI ionization, HeI ion, HeII ion}
"""
chi = phys.nHe/phys.nH
xHeIII = chi - x[1] - x[2]
xHII = 1 - x[0]
xe = xHII + x[2] + 2*xHeIII
n = x * phys.nH * photspec.rs**3
# norm_fac converts from total deposited energy to f_c(z) = (dE/dVdt)dep / (dE/dVdt)inj
norm_fac = phys.nB * photspec.rs**3 / dt / dE_dVdt_inj
f_continuum = getf_continuum(photspec, norm_fac, cross_check)
f_excite_HI = getf_excitation(photspec, norm_fac, dt, xe, n, method, cross_check)
f_HI, f_HeI, f_HeII = getf_ion(photspec, norm_fac, n, method, cross_check)
return | np.array([f_continuum, f_excite_HI, f_HI, f_HeI, f_HeII]) | numpy.array |
# Copyright (c) 2021 <NAME>. Licence included in root of package.
from src.utils import get_base_path
import os
import pandas as pd
import numpy as np
def get_stock_symbols():
destination_folder = os.path.abspath(
os.path.join(get_base_path(), 'src/data/stock_prices'))
file = os.path.join(destination_folder, 'symbols' + '.csv')
data = pd.read_csv(file)
data = | np.array(data) | numpy.array |
#------------------------------------------------------------------------------
# ABOUT NLMpy
#------------------------------------------------------------------------------
# NLMpy is a Python package for the creation of neutral landscape models that
# are widely used in the modelling of ecological patterns and processes across
# landscapes.
# A full description of NLMpy is published in: Etherington TR, Holland EP, and
# O'Sullivan D (2015) NLMpy: a Python software package for the creation of
# neutral landscape models within a general numerical framework. Methods in
# Ecology and Evolution 6(2):164-168 , which is freely available online
# (http://bit.ly/14i4x7n).
# The journal website also holds example scripts and GIS data
# (http://bit.ly/1XUXjOF) that generate the figures in the paper. There are
# also some tutorial videos that provide some advice about installing
# (http://bit.ly/1qLfMjt) and using (http://bit.ly/2491u9n) NLMpy.
#------------------------------------------------------------------------------
# LICENSING
#------------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2014 <NAME>, <NAME>, and
# <NAME>.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#------------------------------------------------------------------------------
import math
import numpy as np
from scipy import ndimage
#------------------------------------------------------------------------------
# REQUIRED FUNCTIONS:
#------------------------------------------------------------------------------
def linearRescale01(array):
"""
A rescale in which the values in the array are linearly rescaled to range
between 0 and 1.
Parameters
----------
array : array
2D array of data values.
Returns
-------
out : array
2D array with rescaled values.
"""
rescaledArray = (array - np.nanmin(array)) / np.nanmax(array - np.nanmin(array))
return(rescaledArray)
#------------------------------------------------------------------------------
# A function to insert nan cells into an array based on a binary mask array.
def maskArray(array, maskArray):
"""
Return the array with nan values inserted where present in the mask array.
It is assumed that both the arrays have the same dimensions.
Parameters
----------
array : array
2D array of data values.
maskArray : array
2D array used as a binary mask.
Returns
-------
out : array
2D array with masked values.
"""
np.place(array, maskArray==0, np.nan)
return(array)
#------------------------------------------------------------------------------
def randomUniform01(nRow, nCol, mask=None):
"""
Create an array with random values ranging 0-1.
Parameters
----------
nRow : int
The number of rows in the array.
nCol : int
The number of columns in the array.
mask : array, optional
2D array used as a binary mask to limit the elements with values.
Returns
-------
out : array
2D float array.
"""
if mask is None:
mask = np.ones((nRow, nCol))
array = np.random.random((nRow, nCol))
maskedArray = maskArray(array, mask)
rescaledArray = linearRescale01(maskedArray)
return(rescaledArray)
#------------------------------------------------------------------------------
def nnInterpolate(array, missing):
"""
Two-dimensional array nearest-neighbour interpolation in which the elements
in the positions indicated by the array "missing" are replaced by the
nearest value from the "array" of data values.
Parameters
----------
array : array
2D array of data values.
missing: boolean array
Values of True receive interpolated values.
Returns
-------
out : array
2D array with interpolated values.
"""
# Get row column based index of nearest value
rcIndex = ndimage.distance_transform_edt(missing, return_distances=False,
return_indices=True)
# Create a complete array by extracting values based on the index
interpolatedArray = array[tuple(rcIndex)]
return(interpolatedArray)
#------------------------------------------------------------------------------
def w2cp(weights):
"""
Convert a list of category weights into a 1D NumPy array of cumulative
proportions.
Parameters
----------
weights : list
A list of numeric values
Returns
-------
out : array
1D array of class cumulative proportions.
"""
w = np.array(weights, dtype=float)
proportions = w / np.sum(w)
cumulativeProportions = np.cumsum(proportions)
cumulativeProportions[-1] = 1 # to ensure the last value is 1
return(cumulativeProportions)
#------------------------------------------------------------------------------
def calcBoundaries(array, cumulativeProportions, classifyMask=None):
"""
Determine upper class boundaries for classification of an array with values
ranging 0-1 based upon an array of cumulative proportions.
Parameters
----------
array : array
2D array of data values.
cumulativeProportions : array
1D array of class cumulative proportions.
classifyMask : array, optional
2D array used as a binary mask to limit the elements used to determine
the upper boundary values for each class.
Returns
-------
out : array
1D float array.
"""
if classifyMask is None:
classifyMask = np.ones(np.shape(array))
maskedArray = array * classifyMask
np.place(maskedArray, classifyMask==0, np.nan)
# Determine the number of cells that are in the classification mask.
nCells = np.count_nonzero(np.isfinite(maskedArray))
# Based on the number of cells, find the index of upper boundary element
boundaryIndexes = (cumulativeProportions * nCells).astype(int) - 1
# Index out the the upper boundary value for each class
boundaryValues = np.sort(np.ndarray.flatten(maskedArray))[boundaryIndexes]
# Ensure the maximum boundary value is equal to 1
boundaryValues[-1] = 1
return(boundaryValues)
#------------------------------------------------------------------------------
def classifyArray(array, weights, classifyMask=None):
"""
Classify an array with values ranging 0-1 into proportions based upon a
list of class weights.
Parameters
----------
array : array
2D array of data values.
weights : list
A list of numeric values
classifyMask : array, optional
2D array used as a binary mask to limit the elements used to determine
the upper boundary values for each class.
Returns
-------
out : array
2D array.
"""
cumulativeProportions = w2cp(weights)
boundaryValues = calcBoundaries(array, cumulativeProportions, classifyMask)
# Classify the array
classifiedArray = np.searchsorted(boundaryValues, array)
# Replace any nan values
classifiedArray = classifiedArray.astype(float)
np.place(classifiedArray, np.isnan(array), np.nan)
return(classifiedArray)
#------------------------------------------------------------------------------
def blendArray(primaryArray, arrays, scalingFactors=None):
"""
Blend a primary array with other arrays weighted by scaling factors.
Parameters
----------
primaryArray : array
2D array of data values.
arrays : list
List of 2D arrays of data values.
scalingFactors : list
List of scaling factors used to weight the arrays in the blend.
Returns
-------
out : array
2D array.
"""
if scalingFactors is None:
scalingFactors = np.ones(len(arrays))
for n in range(len(arrays)):
primaryArray = primaryArray + (arrays[n] * scalingFactors[n])
blendedArray = primaryArray / len(arrays)
rescaledArray = linearRescale01(blendedArray)
return(rescaledArray)
#------------------------------------------------------------------------------
def blendClusterArray(primaryArray, arrays, scalingFactors=None):
"""
Blend a primary cluster NLM with other arrays in which the mean value per
cluster is weighted by scaling factors.
Parameters
----------
primaryArray : array
2D array of data values in which values are clustered.
arrays : list
List of 2D arrays of data values.
scalingFactors : list
List of scaling factors used to weight the arrays in the blend.
Returns
-------
out : array
2D array.
"""
if scalingFactors is None:
scalingFactors = np.ones(len(arrays))
for n in range(len(arrays)):
meanOfClusterArray = meanOfCluster(primaryArray, arrays[n])
primaryArray = primaryArray + (meanOfClusterArray * scalingFactors[n])
blendedArray = primaryArray / len(arrays)
rescaledArray = linearRescale01(blendedArray)
return(rescaledArray)
#------------------------------------------------------------------------------
def meanOfCluster(clusterArray, array):
"""
For each cluster of elements in an array, calculate the mean value for the
cluster based on a second array.
Parameters
----------
clutserArray : array
2D array of data values in which values are clustered.
array : array
2D array of data values.
Returns
-------
out : array
2D array.
"""
meanClusterValues = np.zeros(np.shape(clusterArray))
clusterValues = np.unique(clusterArray)
for value in clusterValues:
if np.isfinite(value):
# Extract location of values
valueLocs = clusterArray == value
# Define clusters in array
clusters, nClusters = ndimage.measurements.label(valueLocs)
# Get mean for each cluster
means = ndimage.mean(array, clusters, range(1,nClusters + 1))
means = np.insert(means, 0, 0) # for background non-cluster
# Apply mean values to clusters by index
clusterMeans = means[clusters]
# Add values for those clusters to array
meanClusterValues = meanClusterValues + clusterMeans
np.place(meanClusterValues, np.isnan(clusterArray), np.nan)
rescaledArray = linearRescale01(meanClusterValues)
return(rescaledArray)
#------------------------------------------------------------------------------
def exportASCIIGrid(outFile, nlm, xll=0, yll=0, cellSize=1):
"""
Export a NLM array as a ASCII grid raster file.
Parameters
----------
outFile : string
The path and name of the output raster file.
nlm : 2D array
The NLM to be exported.
xll : number
Raster lower left corner x coordinate.
yll : number
Raster lower left corner y coordinate.
cellSize : number
The size of the cells in the output raster.
"""
# Get dimensions of the NLM
nRow, nCol = nlm.shape
# Convert any nan elements to null data value of -9999
np.place(nlm, np.isnan(nlm), -9999)
# Create raster out file
textOut = open(outFile, 'w')
# Write metadata
textOut.write("NCOLS " + str(nCol) + "\n")
textOut.write("NROWS " + str(nRow) + "\n")
textOut.write("XLLCORNER " + str(xll) + "\n")
textOut.write("YLLCORNER " + str(yll) + "\n")
textOut.write("CELLSIZE " + str(cellSize) + "\n")
textOut.write("NODATA_VALUE -9999\n")
# Write NLM
for row in range(nRow):
lineout = ""
for col in range(nCol):
lineout = lineout + str(nlm[row,col]) + " "
textOut.write(lineout[:-1] + "\n")
textOut.close()
#------------------------------------------------------------------------------
# NEUTRAL LANDSCAPE MODELS:
#------------------------------------------------------------------------------
def random(nRow, nCol, mask=None):
"""
Create a spatially random neutral landscape model with values ranging 0-1.
Parameters
----------
nRow : int
The number of rows in the array.
nCol : int
The number of columns in the array.
mask : array, optional
2D array used as a binary mask to limit the elements with values.
Returns
-------
out : array
2D array.
"""
array = randomUniform01(nRow, nCol, mask)
return(array)
#------------------------------------------------------------------------------
def planarGradient(nRow, nCol, direction=None, mask=None):
"""
Create a planar gradient neutral landscape model with values ranging 0-1.
Parameters
----------
nRow : int
The number of rows in the array.
nCol : int
The number of columns in the array.
direction: int, optional
The direction of the gradient as a bearing from north, if unspecified
the direction is randomly determined.
mask : array, optional
2D array used as a binary mask to limit the elements with values.
Returns
-------
out : array
2D array.
"""
if direction is None:
direction = np.random.uniform(0, 360, 1) # a random direction
if mask is None:
mask = np.ones((nRow, nCol))
# Create arrays of row and column index
rowIndex, colIndex = np.indices((nRow, nCol))
# Determine the eastness and southness of the direction
eastness = np.sin(np.deg2rad(direction))
southness = np.cos( | np.deg2rad(direction) | numpy.deg2rad |
import os
import glob
import cv2
import argparse
import numpy as np
# SBD by reference
def calc_dic(n_objects_gt, n_objects_pred):
return np.abs(n_objects_gt - n_objects_pred)
def calc_dice(gt_seg, pred_seg):
nom = 2 * np.sum(gt_seg * pred_seg)
denom = np.sum(gt_seg) + np.sum(pred_seg)
dice = float(nom) / float(denom)
return dice
def calc_bd(ins_seg_gt, ins_seg_pred):
gt_object_idxes = list(set(np.unique(ins_seg_gt)).difference([0]))
pred_object_idxes = list(set(np.unique(ins_seg_pred)).difference([0]))
best_dices = []
for gt_idx in gt_object_idxes:
_gt_seg = (ins_seg_gt == gt_idx).astype('bool')
dices = []
for pred_idx in pred_object_idxes:
_pred_seg = (ins_seg_pred == pred_idx).astype('bool')
dice = calc_dice(_gt_seg, _pred_seg)
dices.append(dice)
best_dice = np.max(dices)
best_dices.append(best_dice)
best_dice = np.mean(best_dices)
return best_dice
def calc_sbd(ins_seg_gt, ins_seg_pred):
_dice1 = calc_bd(ins_seg_gt, ins_seg_pred)
_dice2 = calc_bd(ins_seg_pred, ins_seg_gt)
return min(_dice1, _dice2)
# SBD by own made
def BD(img_a, img_b):
ra, ca = img_a.shape
rb, cb = img_b.shape
a_list, b_list = {}, {}
for r in range(ra):
for c in range(ca):
if img_a[r][c] not in a_list:
a_list[img_a[r][c]] = [(r, c)]
else:
temp = a_list[img_a[r][c]]
temp.append((r, c))
a_list[img_a[r][c]] = temp
for r in range(rb):
for c in range(cb):
if img_b[r][c] not in b_list:
b_list[img_a[r][c]] = [(r, c)]
else:
temp = b_list[img_b[r][c]]
temp.append((r, c))
b_list[img_b[r][c]] = temp
M = len(a_list)
N = len(b_list)
max_list = list()
for i in a_list:
result = list()
La = a_list[i]
size_a = len(La)
for j in b_list:
Lb = b_list[j]
size_b = len(Lb)
cover = 0
# check if cover
for coordinate in Lb:
if coordinate in La:
cover+=1
result.append(2*cover/(size_a+size_b))
max_list.append(max(result))
bd = sum(max_list)
bd = bd/M
return bd
def SBD(img_a, img_b):
return min(BD(img_a, img_b), BD(img_b, img_a))
if __name__ == '__main__':
parse = argparse.ArgumentParser()
parse.add_argument('-d', '--dataset', type=str, required=True, help='Input predict dataset')
parse.add_argument('-l', '--label_dataset', type=str, required=False, help='Label dataset path')
args = parse.parse_args()
file_path = glob.glob(os.path.join(args.dataset, '*_ws.png'))
sbd_record = list()
for file_name in file_path:
image_name = file_name.replace('_ws.png', '')
predict = cv2.imread(image_name+'_ws.png', 0)
img_true = cv2.imread(image_name+'_label.png', 0)
score = calc_sbd(img_true, predict)
sbd_record.append(score)
print(file_name, '%.3f'%score)
print('overall mean sbd:', | np.mean(sbd_record) | numpy.mean |
import argparse
import cv2
import numpy as np
import matplotlib.pyplot as plt
from constants import MAX_WIDTH, MAX_HEIGHT
# Transform Parameters
height, width = 1200, 800
# Orignal and transformed keypoints
pts1 = np.float32(
[[103, 93],
[222, 95],
[6, 130],
[310, 128]])
pts2 = np.float32(
[[0, 0],
[width, 0],
[0, height],
[width, height]])
# Translation Matrix
tx, ty = 0, 0
translation_matrix = np.float32([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
new_height, new_width = int(height * 1) + ty + 600, int(width * 1.2) + tx
# calculate the perspective transform matrix
transform_matrix = cv2.getPerspectiveTransform(pts1, pts2)
complete_transform = np.dot(translation_matrix, transform_matrix)
def imshow(im, name=""): # pragma: no cover
plt.figure(name)
# BGR to RGB
plt.imshow(im[:, :, ::-1])
plt.grid(True)
def showTransform(image): # pragma: no cover
im = image.copy()
for (cx, cy) in pts1:
cv2.circle(im, (int(cx), int(cy)), 8, (0, 255, 0), -1)
imshow(im, name="transform")
def transformPoints(x, y):
points = []
for i in range(len(x)):
point = transformPoint( | np.array([x[i], y[i], 1]) | numpy.array |
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import itertools
import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import Function, get_new_symbol
from coremltools.converters.mil.mil.var import Var
from .. import ops
from ..converter import TorchConverter, TranscriptionContext
from ..internal_graph import InternalTorchIRNode
class TestTorchOps:
"""Class containing tests for converting TorchIR -> CoreML ops.
These tests interface with only the InternalTorchIRGraph and do not
build a torch module. Thus, they are much faster then the numerical tests.
However, for some ops it is necessary to use the torch module to verify
numerical output so they are placed the numerical tests.
NOTE: Confused where @context is coming from? Its from the pytest fixture defined below.
"""
@pytest.fixture
def context(self):
return TranscriptionContext()
@pytest.fixture
def set_random_seeds(self):
torch.manual_seed(1)
np.random.seed(1)
@pytest.mark.parametrize("dtype", [torch.bool, torch.float, torch.int])
def test_constant(self, context, dtype):
test_data = torch.ones(1, dtype=dtype)
node = InternalTorchIRNode(
attr={"value": test_data}, kind="constant", inputs=[], outputs=["1"]
)
ssa = self._construct_test_graph(context, ops.constant, node, "1")
assert np.allclose(test_data, ssa.val)
assert test_data.shape == ssa.shape
def test_constant_magic(self, context):
test_val = ops.PYTORCH_MAGIC_DEFAULT
node = InternalTorchIRNode(
attr={"value": test_val}, kind="constant", inputs=[], outputs=["1"]
)
ssa = self._construct_test_graph(context, ops.constant, node, "1")
# We expect the magic default to get converted to None
assert ssa is None
@staticmethod
def _gen_constants(size, vals):
"""Helper function. Generates a list of internal constant nodes.
Arguments:
size: number of constants to generate
vals: Either a list of values for each constant or one value used for all constants."""
is_list = isinstance(vals, list)
if is_list:
if len(vals) != size:
raise ValueError("len(@vals): {} != size: {}".format(len(vals), size))
constants = []
for index in range(size):
if is_list:
val = vals[index]
else:
val = vals
constants.append(
InternalTorchIRNode(
attr={"value": val},
kind="constant",
inputs=[],
outputs=[str(index)],
)
)
input_list = [str(i) for i in range(size)]
output_name = str(len(input_list))
return constants, input_list, output_name
@staticmethod
def _construct_test_graph(
context, test_op, test_node, output_name=None, graph_inputs=None, constants=None
):
""" Construct an Function for the given @graph_inputs, @constants,
and @test_node. Returns the output of the graph, which is the ssa
Var of the given @output_name.
"""
if graph_inputs is None:
graph_inputs = {}
if constants is None:
constants = []
with Function(inputs=graph_inputs) as ssa_func:
for name in ssa_func.inputs.keys():
context.add(ssa_func.inputs[name])
for node in constants:
ops.constant(context, node)
test_op(context, test_node)
ssa = None
if output_name:
ssa = context[output_name]
return ssa
def _test_elementwise_binary(
self, context, op_name, op, test_input, num_constants, expected_result
):
"""Helper function, runs op on test input and compares against expected result"""
constants, input_list, output_name = self._gen_constants(
num_constants, test_input
)
eb_node = InternalTorchIRNode(
kind=op_name, inputs=input_list, outputs=[output_name]
)
ssa = self._construct_test_graph(
context, op, eb_node, output_name, constants=constants
)
np.testing.assert_allclose(expected_result, ssa.val, atol=1e-7)
def _test_cast(self, context, test_val, op_kind, op_func, python_type):
constants, input_list, output_name = self._gen_constants(1, [test_val])
node = InternalTorchIRNode(
kind=op_kind, inputs=input_list, outputs=[output_name]
)
ssa = self._construct_test_graph(
context, op_func, node, output_name, constants=constants
)
assert ssa.val == python_type(test_val)
def _test_activation(
self, context, input_shape, constants_list, op_kind, op_func, torch_func, atol
):
test_input = torch.rand(input_shape)
constants, input_list, output_name = self._gen_constants(
len(constants_list) + 1, [test_input] + constants_list
)
node = InternalTorchIRNode(
kind=op_kind, inputs=input_list, outputs=[output_name]
)
ssa = self._construct_test_graph(
context, op_func, node, output_name, constants=constants
)
expected_result = torch_func(test_input).numpy()
np.testing.assert_allclose(expected_result, ssa.val, atol=atol)
def test_add(self, context):
test_input_1 = np.random.rand(2, 3)
test_input_2 = np.random.rand(2, 3)
scale_factor = 1
self._test_elementwise_binary(
context,
"Add",
ops.add,
[test_input_1, test_input_2, scale_factor],
3,
test_input_1 + test_input_2,
)
def test_add_no_scale_factor(self, context):
test_input_1 = np.random.rand(2, 3)
test_input_2 = np.random.rand(2, 3)
self._test_elementwise_binary(
context,
"Add",
ops.add,
[test_input_1, test_input_2],
2,
test_input_1 + test_input_2,
)
@pytest.mark.parametrize(
"test_input_1, test_input_2",
[(np.random.rand(3, 2), np.random.rand(3, 2)), (np.random.rand(3, 2), 5),],
)
def test_sub(self, context, test_input_1, test_input_2):
scale_factor = 1
self._test_elementwise_binary(
context,
"Sub",
ops.sub,
[test_input_1, test_input_2, scale_factor],
3,
test_input_1 - test_input_2,
)
@pytest.mark.parametrize(
"test_input_1, test_input_2",
[(np.random.rand(3, 2), np.random.rand(3, 2)), (np.random.rand(3, 2), 5),],
)
def test_rsub(self, context, test_input_1, test_input_2):
scale_factor = 1
self._test_elementwise_binary(
context,
"rsub",
ops.sub,
[test_input_1, test_input_2, scale_factor],
3,
# Note the reversal of arg ordering relative to 'sub'
test_input_2 - test_input_1,
)
def test_mul(self, context):
test_input_1 = np.random.rand(3, 2)
test_input_2 = | np.random.rand(3, 2) | numpy.random.rand |
'''
Basic classes for sections and surfaces, and fundamental functions
'''
import copy
import os
import re
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import CubicSpline
from scipy import spatial
from scipy.interpolate import interp1d
from scipy.spatial.distance import cdist
class BasicSection():
'''
Section: 3D curve and 2D unit curve
'''
def __init__(self, thick=None, chord=1.0, twist=0.0):
self.xLE = 0.0
self.yLE = 0.0
self.zLE = 0.0
self.chord = chord
self.twist = twist
self.thick = 0.0
self.thick_set = thick
#* 2D unit curve
self.xx = None
self.yy = None # open curve
self.yu = None # upper surface of closed curve
self.yl = None # lower surface of closed curve
#* 3D section
self.x = np.zeros(1)
self.y = np.zeros(1)
self.z = np.zeros(1)
def set_params(self, init=False, **kwargs):
'''
Set parameters of the section
### Inputs:
```text
init: True, set to default values
```
### kwargs:
```text
xLE, yLE, zLE, chord, twist, thick (None)
```
'''
if init:
self.xLE = 0.0
self.yLE = 0.0
self.zLE = 0.0
self.chord = 1.0
self.twist = 0.0
self.thick = 0.0
self.thick_set = None
return
if 'xLE' in kwargs.keys():
self.xLE = kwargs['xLE']
if 'yLE' in kwargs.keys():
self.yLE = kwargs['yLE']
if 'zLE' in kwargs.keys():
self.zLE = kwargs['zLE']
if 'chord' in kwargs.keys():
self.chord = kwargs['chord']
if 'twist' in kwargs.keys():
self.twist = kwargs['twist']
if 'thick' in kwargs.keys():
self.thick_set = kwargs['thick']
def section(self, nn=1001, flip_x=False, proj=True):
'''
### Functions:
```text
1. Construct 2D unit curve (null in the BasicSection)
2. Transform to 3D curve
```
### Inputs:
```text
nn: total amount of points (it's here for function BasicSurface.geo_secs)
flip_x: True ~ flip section.xx in reverse order
proj: True => for unit airfoil, the rotation keeps the projection length the same
```
'''
if not isinstance(self.xx, np.ndarray):
raise Exception('The 2D curve has not been constructed')
#* Flip xx
if flip_x:
self.xx = | np.flip(self.xx) | numpy.flip |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = | np.array([]) | numpy.array |
from numpy import random
import numpy
import pickle
import sys
from envs.env_maze import MazeTask
from envs.env_maze import gen_pattern as gen_single
def dump_pattern(pattern_number, cell_scale, file_name):
handle = open(file_name, "wb")
value = [gen_single(cell_scale=cell_scale, crowd_ratio=0.10) for _ in range(pattern_number)]
pickle.dump(value, handle)
handle.close()
def load_pattern(file_name):
handle = open(file_name, "rb")
value = pickle.load(handle)
handle.close()
return value
def gen_patterns(n=16, cell_scale=11, file_name=None, crowd_ratio=None):
if(file_name is None):
if(crowd_ratio is not None):
return [gen_single(cell_scale=cell_scale, crowd_ratio=crowd_ratio) for _ in range(n)]
else:
return [gen_single(cell_scale=cell_scale) for _ in range(n)]
else:
patterns = load_pattern(file_name)
size = len(patterns)
if(size < n):
return patterns
else:
idxes = | numpy.arange(size, dtype="int32") | numpy.arange |
#!/usr/bin/env python
"""File: pointpatterns.py
Module to facilitate point pattern analysis in arbitrarily shaped 2D windows.
"""
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from random import sample
import numpy
import pandas
from scipy import integrate, optimize, interpolate
from scipy.spatial import distance, Voronoi
from scipy.stats import percentileofscore
from shapely import geometry, affinity, ops, speedups
from matplotlib import pyplot, patches
from collections import Sequence
from .utils import AlmostImmutable, sensibly_divide, project_vectors
from .memoize.memoize import memoize_method
try:
__ = basestring
except NameError:
basestring = str
if speedups.available:
speedups.enable()
_PI = numpy.pi
_2PI = 2.0 * _PI
_PI_4 = _PI / 4.0
RSAMPLES = 49
QUADLIMIT = 100
ORIGIN = geometry.Point((0.0, 0.0))
class Window(geometry.Polygon):
"""
Represent a polygon-shaped window in the Euclidean plane, and provide
methods for computing quantities related to it.
"""
def __reduce__(self):
memcache = memoize_method.cache_name
red = list(geometry.Polygon.__reduce__(self))
red[2] = {'state': red[2],
memcache: getattr(self, memcache, {})}
return tuple(red)
def __setstate__(self, state):
geometry.Polygon.__setstate__(self, state.pop('state'))
for key in state:
setattr(self, key, state[key])
def includes(self, p):
return self.contains(p) or self.boundary.contains(p)
def wrap_into(self, points):
"""
Wrap a set of points into a plane-filling window
Parameters
----------
points : array-like, shape (n, 2)
Array of `n` point coordinates.
Returns
-------
ndarray, shape (n, 2)
New array of wrapped point coordinates.
"""
# Translate origin to centroid
cen = self.centroid
points = numpy.asarray(points) - cen
# Wrap points directly into the rhomboidal window
lattice = self.lattice()
basis = lattice[:2]
pcoeffs = project_vectors(points, basis)
pcoeffs = numpy.mod(pcoeffs + 0.5, 1.0) - 0.5
points = pcoeffs.dot(basis)
# If window is hexagonal there may be some residual wrapping to do
if len(lattice) == 6:
# Do a full loop to nudge all edge cases to where they belong
vertices = self.vertices()
for i, vertex in enumerate(vertices):
for b in lattice[i:i + 2]:
bas = numpy.vstack((vertex, .5 * b))
ppoints = project_vectors(points, bas)
pv, pb = ppoints[:, 0], ppoints[:, 1]
retard = (pv + pb > 1.0) & (pv > 0.0) & (pb > 0.0)
advance = (pv + pb < -1.0) & (pv < 0.0) & (pb < 0.0)
points[retard] -= b
points[advance] += b
# Translate back
points += cen
return points
def vertices(self):
"""
Find the vectors from the centroid to the vertices of the window
Returns
-------
ndarray
Array of vertex vectors
"""
vertices = numpy.asarray(self.boundary)[:-1] - self.centroid
# Sort by angle, starting with from the x axis
angles = numpy.arctan2(vertices[:, 1], vertices[:, 0])
asort = numpy.argsort(angles)
start_index = numpy.argmin(numpy.mod(angles[asort], _2PI))
asort = numpy.roll(asort, -start_index)
return vertices[asort]
@memoize_method
def lattice(self):
"""
Compute lattice vectors of a Bravais lattice having the window as unit
cell
The lattice vectors are stored as an n-by-2 array, with n the number of
window edges, such that each row contains the coordinates of a lattice
vector crossing a window edge
If the window is not a simple plane-filling polygon (parallellogram or
hexagon with reflection symmetry through its center), a ValueError
is raised.
Returns
-------
ndarray
Array of lattice vectors.
"""
vertices = self.vertices()
l = vertices.shape[0]
vrotated = numpy.roll(vertices, l // 2, axis=0)
if not (l in (4, 6) and numpy.allclose(vertices, -vrotated)):
raise ValueError("window must be a simple plane-filling polygon "
"(a parallellogram, or a hexagon with reflection "
"symmetry through its center) to compute lattice "
"vectors.")
lattice = vertices + numpy.roll(vertices, 1, axis=0)
# Sort by angle, starting with the one before the first vertex vector
angles = numpy.arctan2(lattice[:, 1], lattice[:, 0])
asort = numpy.argsort(angles)
start_angle = numpy.arctan2(vertices[0, 1], vertices[0, 0])
start_index = numpy.argmin(numpy.mod(start_angle - angles[asort],
_2PI))
asort = numpy.roll(asort, -start_index)
return lattice[asort]
@memoize_method
def inscribed_circle(self):
"""
Compute the center and radius of the largest circle that can be
inscribed in the polygon
..note:: The largest inscribed circle is found using a standard
optimization routine. There is in principle no guarantee that it
will converge to the global optimum that corresponds to the largest
inscribed circle possible.
Returns
-------
Series
The x and y coordinates of the inscribed circle center, and the
radius of the inscribed circle, with the index ('x', 'y', 'r').
"""
def d(p):
point = geometry.Point(p)
if self.contains(point):
return -self.boundary.distance(point)
else:
return 0.0
cent = self.centroid
x, y = optimize.minimize(d, (cent.x, cent.y)).x
r = -d((x, y))
return dict(x=x, y=y, r=r)
@memoize_method
def longest_diagonal(self):
"""
Compute the length of the longest diagonal across the polygon
Returns
-------
scalar
Length of the longest diagonal.
"""
bpoints = list(geometry.MultiPoint(self.boundary.coords[:-1]))
dmax = 0.0
while bpoints:
p1 = bpoints.pop()
for p2 in bpoints:
d = p1.distance(p2)
if d > dmax:
dmax = d
return dmax
@memoize_method
def voronoi(self):
"""
Compute the central Voronoi unit cell of the lattice defined by the
window
Returns
-------
Window
New window instance representing the lattice Voronoi unit cell,
centered at the origin (not at the centroid of this Window
instance).
"""
lattice = self.lattice()
lattice_r1 = numpy.roll(lattice, 1, axis=0)
lattice_points = numpy.vstack(((0.0, 0.0), lattice,
lattice + lattice_r1))
voronoi = Voronoi(lattice_points)
window = voronoi.vertices[voronoi.regions[voronoi.point_region[0]]]
return type(self)(window)
@memoize_method
def centered(self):
"""
Compute a translation of the window such that the centroid coincides
with the origin
Returns
-------
Window
Centered window.
"""
cent = self.centroid
return affinity.translate(self, xoff=-cent.x, yoff=-cent.y)
@memoize_method
def diagonal_cut(self):
"""
Compute the window obtained byt cutting this window in half along
a diagonal
This operation can only be performed on windows with an even number of
vertices and reflection symmetry through the centroid. This ensures
that all diagonals between opposite vertices cut the window into two
halves.
Returns
-------
Window
Diagonally cut window.
"""
boundary = numpy.asarray(self.boundary)[:-1]
vertices = boundary - self.centroid
l = vertices.shape[0]
l_2 = l // 2
vrotated = numpy.roll(vertices, l_2, axis=0)
if not (l % 2 == 0 and numpy.allclose(vertices, -vrotated)):
raise ValueError("window must have an even number of vertices and "
"reflection symmetry through its centroid to "
"compute diagonal cut.")
# We want to begin in the lower right quadrant
angles = numpy.arctan2(vertices[:, 1], vertices[:, 0])
asort = numpy.argsort(angles)
start_index = numpy.argmin(numpy.abs(angles[asort] + _PI_4))
asort = numpy.roll(asort, -start_index)
new_boundary = boundary[asort[:l_2 + 1]]
return type(self)(new_boundary)
def dilate_by_this(self, other):
"""
Dilate another polygon by this polygon
:other: polygon to dilate
:returns: dilated polygon
NB! Don't know if this algorithm works in all cases
"""
plist = []
sbpoints = geometry.MultiPoint(self.boundary)[:-1]
obpoints = geometry.MultiPoint(other.boundary)[:-1]
for p in numpy.asarray(sbpoints):
plist.append(affinity.translate(other, xoff=p[0], yoff=p[1]))
for p in numpy.asarray(obpoints):
plist.append(affinity.translate(self, xoff=p[0], yoff=p[1]))
return ops.cascaded_union(plist)
def erode_by_this(self, other):
"""
Erode another polygon by this polygon
:other: polygon to erode
:returns: eroded polygon
NB! Don't know if this algorithm is correct in all cases
"""
eroded = type(self)(other)
sbpoints = geometry.MultiPoint(self.boundary)[:-1]
for p in numpy.asarray(sbpoints):
eroded = eroded.intersection(affinity.translate(other, xoff=-p[0],
yoff=-p[1]))
return eroded
def translated_intersection(self, xoff, yoff):
"""
Compute the intersection of the window with a translated copy of itself
:xoff: distance to translate in the x direction
:yoff: distance to translate in the y direction
:returns: a Window instance corresponding to the intersection
"""
return self.intersection(affinity.translate(self, xoff=xoff,
yoff=yoff))
@memoize_method
def _set_covariance_interpolator(self):
"""
Compute a set covariance interpolator for the window
Returns
-------
RectangularGridInterpolator
Interpolator that computes the the set covariance of the window.
"""
ld = self.longest_diagonal()
rssqrt = int(numpy.sqrt(RSAMPLES))
xoffs = numpy.linspace(-ld, ld, 4 * (rssqrt + 1) - 1)
yoffs = numpy.linspace(-ld, ld, 4 * (rssqrt + 1) - 1)
scarray = numpy.zeros((xoffs.size, yoffs.size))
for (i, xoff) in enumerate(xoffs):
for (j, yoff) in enumerate(yoffs):
scarray[i, j] = self.translated_intersection(xoff, yoff).area
#return interpolate.RegularGridInterpolator((xoffs, yoffs), scarray,
# bounds_error=False,
# fill_value=0.0)
return interpolate.RectBivariateSpline(xoffs, yoffs, scarray,
kx=3, ky=3)
def set_covariance(self, x, y):
"""
Compute the set covariance of the window at given displacements
This is a wrapper around self._set_covariance_interpolator, providing
a user friendly call signature.
Parameters
----------
x, y : array-like
Arrays of the same shape giving x and y values of the displacements
at which to evaluate the set covariance.
Returns
-------
ndarray
Array of the same shape as `x` and `y` containing the set
covariance at each displacement.
"""
#xi = numpy.concatenate((x[..., numpy.newaxis],
# y[..., numpy.newaxis]), axis=-1)
#return self._set_covariance_interpolator()(xi)
return self._set_covariance_interpolator()(x, y, grid=False)
@memoize_method
def _isotropised_set_covariance_interpolator(self):
"""
Compute an isotropised set covariance interpolator for the window
Returns
-------
interp1d
Interpolator that computes the the isotropised set covariance of
the window.
"""
rvals = numpy.linspace(0.0, self.longest_diagonal(),
2 * (RSAMPLES + 1) - 1)
iso_set_cov = numpy.zeros_like(rvals)
# Identify potentially problematic angles and a safe starting- and
# ending angle for the quadrature integration
xy = numpy.asarray(self.boundary)[:-1]
problem_angles = numpy.sort(numpy.arctan2(xy[:, 1], xy[:, 0]))
theta0 = 0.5 * (problem_angles[0] + problem_angles[-1] - _2PI)
for (i, rval) in enumerate(rvals):
def integrand(theta):
return self.set_covariance(rval * numpy.cos(theta),
rval * numpy.sin(theta))
iso_set_cov[i] = (integrate.quad(integrand, theta0,
_2PI + theta0,
limit=QUADLIMIT,
points=problem_angles)[0] / _2PI)
return interpolate.interp1d(rvals, iso_set_cov, kind='cubic',
bounds_error=False, fill_value=0.0)
def isotropised_set_covariance(self, r):
"""
Compute the isotropised set covariance of the window at given
displacements
This is a wrapper around self._isotropised_set_covariance_interpolator,
providing a user friendly call signature.
Parameters
----------
r : array-like
Array giving the displacements at which to evaluate the isotropised
set covariance.
Returns
-------
ndarray
Array of the same shape as `r` containing the isotropised set
covariance at each displacement.
"""
return self._isotropised_set_covariance_interpolator()(r)
@memoize_method
def _ball_difference_area_interpolator(self):
"""
Compute a ball difference area interpolator for the window
Returns
-------
interp1d
Interpolator that computes the the ball difference area for the
window.
"""
rvals = numpy.linspace(0.0, .5 * self.longest_diagonal(), RSAMPLES)
ball_diff_area = numpy.zeros_like(rvals)
centroid = self.centroid
for (i, r) in enumerate(rvals):
disc = centroid.buffer(r)
ball_diff_area[i] = self.difference(disc).area
return interpolate.interp1d(rvals, ball_diff_area, kind='cubic',
bounds_error=False, fill_value=0.0)
def ball_difference_area(self, r):
"""
Compute the area of the set difference of the window and a ball of
a given radius centered on the window centroid
This function provides a speedup of this computation for multiple
values of the radius, by relying on an interpolator.
Parameters
----------
r : array-like
Array giving the radii of the balls to subtract from the window.
Returns
ndarray
Array of the same shape as `r` containing for each value in `r` the
area of the set difference of the window and b(c, r), where c is
the centroid of the window.
"""
return self._ball_difference_area_interpolator()(r)
@memoize_method
def _pvdenom_interpolator(self):
"""
Compute an interpolator for the denominator of the p-function for the
adapted intensity estimator based on area
Returns
-------
interp1d
Interpolator that computes the the p-function denominator.
"""
def integrand(t):
return _2PI * t * self.isotropised_set_covariance(t)
rvals = numpy.linspace(0.0, self.longest_diagonal(), RSAMPLES)
dvals = numpy.empty_like(rvals)
for (i, rval) in enumerate(rvals):
dvals[i] = integrate.quad(integrand, 0.0, rval,
limit=QUADLIMIT,
)[0]
return interpolate.interp1d(rvals, dvals, kind='cubic',
bounds_error=True)
def pvdenom(self, r):
"""
Compute the denominator of the p-function for the adapted intensity
estimator based on area
This is a wrapper around self._pvdenom_interpolator, providing a user
friendly call signature.
Parameters
----------
r : array-like
Array giving the distances at which to evaluate the p-function
denominator.
Returns
-------
ndarray
Array of the same shape as `r` containing the p-function
denominator at each distance.
"""
return self._pvdenom_interpolator()(r)
def p_V(self, point, r):
"""
Compute the p-function for the adapted intensity estimator based on
area
:point: a Point instance giving the location at which to evaluate the
function
:r: array-like with radii around 'point' at which to ev' 'aluate the
p-function
:returns: the value of the area p-function
"""
r = numpy.asarray(r)
num = numpy.empty_like(r)
r_ravel = r.ravel()
num_ravel = num.ravel()
for (i, rval) in enumerate(r_ravel):
num_ravel[i] = self.intersection(point.buffer(rval)).area
return sensibly_divide(num, self.pvdenom(r))
def p_S(self, point, r):
"""
Compute the p-function for the adapted intensity estimator based on
perimeter
:point: a Point instance giving the location at which to evaluate the
function
:r: array-like with radii around 'point' at which to evaluate the
p-function
:returns: the value of the perimeter p-function
"""
r = numpy.asarray(r)
num = numpy.empty_like(r)
r_ravel = r.ravel()
num_ravel = num.ravel()
for (i, rval) in enumerate(r_ravel):
num_ravel[i] = self.intersection(
point.buffer(rval).boundary).length
denom = _2PI * r * self.isotropised_set_covariance(r)
return sensibly_divide(num, denom)
def patch(self, **kwargs):
"""
Return a matplotlib.patches.Polygon instance for this window
:kwargs: passed through to the matplotlib.patches.Polygon constructor
:returns: matplotlib.patches.Polygon instance
"""
return patches.Polygon(self.boundary, **kwargs)
def plot(self, axes=None, linewidth=2.0, fill=False, **kwargs):
"""
Plot the window
The window can be added to an existing plot via the optional 'axes'
argument.
:axes: Axes instance to add the window to. If None (default), the
current Axes instance with equal aspect ratio is used if any, or
a new one created.
:linewidth: the linewidth to use for the window boundary. Defaults to
2.0.
:fill: if True, plot a filled window. If False (default), only plot the
boundary.
:kwargs: additional keyword arguments passed on to the
patches.Polygon() constructor. Note in particular the keywords
'edgecolor', 'facecolor' and 'label'.
:returns: the plotted matplotlib.patches.Polygon instance
"""
if axes is None:
axes = pyplot.gca(aspect='equal')
cent = self.centroid
diag = self.longest_diagonal()
axes.set(xlim=(cent.x - diag, cent.x + diag),
ylim=(cent.y - diag, cent.y + diag))
wpatch = self.patch(linewidth=linewidth, fill=fill, **kwargs)
wpatch = axes.add_patch(wpatch)
return wpatch
class PointPattern(AlmostImmutable, Sequence):
"""
Represent a planar point pattern and its associated window, and provide
methods for analyzing its statistical properties
Parameters
----------
points : sequence or MultiPoint
A sequence of coordinate tuples or any other valid MultiPoint
constructor argument, representing the points in the point pattern.
window : sequence or Polygon or Window
A sequence of coordinate tuples or any other valid Window constructor
argument, defining the set within which the point pattern is takes
values. A ValueError is raised if the window does not contain all
points in `points`. The Window method `wrap_into` can be used to wrap
points into the window before initalization, if the window is a simple
plane-filling polygon (thus providing periodic boundary conditions by
which the points can be wrapped).
pluspoints : sequence or MultiPoint, optional
Like `points`, but representing a set of extra points (usually outside
the window) to use for plus sampling.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the default edge handling to apply in computations:
``stationary``
Translational edge correction used. Intensity estimated by the
adapted intensity estimator based on area.
``finite``
Translational edge correction used. Intensity estimated by the
standard intensity estimator.
``isotropic``
Rotational edge correction used. Intensity estimated by the adapted
intensity estimator based on area.
``periodic``:
No edge correction used, but points are assumed to repeat
periodically according on a lattice defined by the basis vectors in
`self.window.lattice` (if defined). Intensity estimated by the
standard intensity estimator.
``plus``
No edge correction, but plus sampling is used instead. Intensity
estimated by the standard intensity estimator.
"""
_edge_config = {
'stationary': {
'pmode': 'default',
'imode': 'area',
},
'finite': {
'pmode': 'default',
'imode': 'standard', # corrected,
},
'isotropic': {
'pmode': 'default',
'imode': 'perimeter',
},
'periodic': {
'pmode': 'periodic',
'imode': 'standard', # corrected,
},
'plus': {
'pmode': 'plus',
'imode': 'standard', # corrected,
},
}
def __init__(self, points, window, pluspoints=None,
edge_correction='stationary'):
# Avoid copying the window unless needed
if not isinstance(window, Window):
window = Window(window)
self.window = window
points = geometry.MultiPoint(points)
if len(set(map(tuple, numpy.asarray(points)))) != len(points):
raise ValueError("{} instances do not support point patterns "
"with multiple exactly equal points"
.format(type(self)))
if not window.includes(points):
raise ValueError("Not all points in 'points' are included in "
"'window'.")
self._points = points
self.pluspoints = geometry.MultiPoint(pluspoints)
self.edge_correction = edge_correction
# Implement abstract methods
def __getitem__(self, index, *args, **kwargs):
return self._points.__getitem__(index, *args, **kwargs)
def __len__(self, *args, **kwargs):
return self._points.__len__(*args, **kwargs)
# Override certain possibly very slow mixins
def __iter__(self, *args, **kwargs):
return self._points.__iter__(*args, **kwargs)
def __reversed__(self, *args, **kwargs):
return self._points.__reversed__(*args, **kwargs)
def index(self, *args, **kwargs):
return self._points.index(*args, **kwargs)
def _inherit_binary_operation(self, other, op):
"""
Define the general pattern for inheriting a binary operation on the
points as a binary operation on the PointPattern
Parameters
----------
other : shapely object
The binary operation is applied to `self` and `other`. If `other`
is also a `PointPattern` instance, an exception is raised if they
are not defined in `Window` instances that compare equal. If
`other` is not a `PointPattern` instance, the binary operation is
applied to `self._points` and `other`. The result of this operation
is returned directly, unless it is a `geometry.MultiPoint` or
`geometry.Point` instance, in which case it is used to initialize
a new `PointPattern` instance in the same window as `self`. If
applying the binary operation to `self.pluspoints` and `other` also
returns a `geometry.MultiPoint` or `geometry.Point` instance, this
is used as the `pluspoints` of the new `PointPattern`.
op : string or callable
Either a string naming the attribute of `self._points` that
implements the binary operation, or a callable implementing the
binary operation on two shapely objects.
Returns
-------
PointPattern
The result of the binary operation applied to the `PointPattern`
instances.
"""
spoints = self._points
spluspoints = self.pluspoints
if (isinstance(op, basestring) and
hasattr(spoints, op) and
hasattr(spluspoints, op)):
bound_op = getattr(spoints, op)
bound_op_plus = getattr(spluspoints, op)
else:
def bound_op(ogeom):
return op(spoints, ogeom)
def bound_op_plus(opluspoints):
return op(spluspoints, opluspoints)
swindow = self.window
if isinstance(other, type(self)) or isinstance(self, type(other)):
owindow = other.window
if not (swindow == owindow):
raise ValueError("instances of {} must be defined over "
"instances of {} that compare equal for "
"binary operations to be defined"
.format(self.__class__.__name__,
swindow.__class__.__name__))
new_points = bound_op(other._points)
new_pluspoints = bound_op_plus(other.pluspoints)
return type(self)(new_points, swindow, pluspoints=new_pluspoints,
edge_correction=self.edge_correction)
# Apparently, other is not a PointPattern. Do the easiest thing.
new_geom = bound_op(other)
if isinstance(new_geom, geometry.Point):
new_geom = geometry.MultiPoint((new_geom,))
if isinstance(new_geom, geometry.MultiPoint):
new_pluspoints = None
potential_pluspoints = bound_op_plus(other)
if isinstance(potential_pluspoints, geometry.Point):
potential_pluspoints = geometry.MultiPoint((new_pluspoints,))
if isinstance(potential_pluspoints, geometry.MultiPoint):
new_pluspoints = potential_pluspoints
return type(self)(
new_geom, swindow, pluspoints=new_pluspoints,
edge_correction=self.edge_correction)
return new_geom
def difference(self, other):
return self._inherit_binary_operation(other, 'difference')
def intersection(self, other):
return self._inherit_binary_operation(other, 'intersection')
def symmetric_difference(self, other):
return self._inherit_binary_operation(other, 'symmetric_difference')
def union(self, other):
return self._inherit_binary_operation(other, 'union')
def periodic_extension(self, periodic_levels):
"""
Compute the periodic extension of this point pattern
The extension is made by assuming that periodic boundary conditions
hold across the boundaries of the window associated with the pattern.
Returns
-------
periodic_levels : integer
The number of levels of periodic extensions to compute. A level
roughly consists of all the lattice displacements that can be
written as a sum of an equal number of lattice unit vectors.
MultiPoint
MultiPoint instance containing the points comprising the periodic
extension. Note that the points from the pattern itself are not
included.
"""
lattice = self.window.lattice()
lattice_r1 = numpy.roll(lattice, 1, axis=0)
dvec_list = []
for i in range(periodic_levels + 1):
for l in range(i):
k = i - l
dvec_list.append(k * lattice + l * lattice_r1)
dvecs = numpy.vstack(dvec_list)
periodic_points = ops.cascaded_union(
[affinity.translate(self.points(), xoff=dvec[0], yoff=dvec[1])
for dvec in dvecs])
return periodic_points
def points(self, mode='default', periodic_levels=2, project_points=False):
"""
Return the points in the pattern
Parameters
----------
mode : str {'default', 'periodic', plus'}, optional
String to select points:
``default``
The points constituting the pattern are returned.
``periodic``
The union of the pattern and its periodic extension as defined
by `self.periodic_extension` is returned.
``plus``
The union of the pattern and the associated plus sampling
points in `self.pluspoints` is returned.
periodic_levels : integer, optional
The number of periodic levels to compute if `mode == 'periodic'`.
See `PointPattern.periodic_extension` for explanation.
project_points : bool, optional
If True, the points will be projected into the unit square by
oblique projection onto the edges of the window of the point
pattern. The periodic extension points or plus sampling points will
of course take values outside the unit square, but will be subject
to the same transformation. If the window is not rhomboidal, an
error will be raised.
Returns
-------
MultiPoint
MultiPoint instance containing the requested points.
"""
if mode == 'default':
points = self._points
elif mode == 'periodic':
points = self._points.union(
self.periodic_extension(periodic_levels))
elif mode == 'plus':
points = self._points.union(self.pluspoints)
else:
raise ValueError("unknown mode: {}".format(mode))
if project_points:
basis_vectors = self.window.lattice()
if len(basis_vectors) != 4:
raise ValueError("projection is only possible for point "
"patterns in rhomboidal windows.")
basis_vectors = basis_vectors[:2]
# Find the lower left corner (with respect to the basis vectors)
# of the window
boundary = numpy.asarray(self.window.boundary)[:-1]
boundary_coeffs = project_vectors(boundary, basis_vectors)
anchor_coeffs = min(boundary_coeffs, key=numpy.sum)
# Subtract anchor and project
parray = numpy.array(points) - anchor_coeffs.dot(basis_vectors)
point_coeffs = project_vectors(parray, basis_vectors)
points = geometry.MultiPoint(point_coeffs)
return points
@staticmethod
def range_tree_build(points):
"""
Construct a range tree from a set of points
Parameters
----------
points : sequence
Sequence of coordinate tuples instances to build the range tree
from. ..note:: shapely Point instances are not supported.
Returns
-------
tuple
Root node of the range tree. The nodes are tuples in the
following format:
[median_point, left_child, right_child, associated_binary_tree].
The associated binary tree at each node points to the root node of
a binary tree with nodes in the following format:
[median_point_r, left_child, right_child]. Here, `median_point` is
a regular coordinate tuple, while `median_point_r` is a reversed
coordinate tuple.
"""
def binary_node_stuff(points, sort_index):
# Binary tree node format: [point, left, right]
mid = len(sort_index) // 2
p = points[sort_index[mid]]
si_l, si_r = sort_index[:mid], sort_index[mid:]
return [p, None, None], si_l, si_r
def build_binary_tree(points, sort_index):
root_stuff = binary_node_stuff(points, sort_index)
stack = []
if len(sort_index) > 1:
stack.append(root_stuff)
while stack:
current, si_l, si_r = stack.pop()
left_stuff = binary_node_stuff(points, si_l)
current[1] = left_stuff[0]
if len(si_l) > 1:
stack.append(left_stuff)
right_stuff = binary_node_stuff(points, si_r)
current[2] = right_stuff[0]
if len(si_r) > 1:
stack.append(right_stuff)
return root_stuff[0]
def range_node_stuff(points, xsort_index, points_r, ysort_index):
# Range tree node format: [point, left, right,
# associated_binary_tree)
b = build_binary_tree(points_r, ysort_index)
mid = len(xsort_index) // 2
p = points[xsort_index[mid]]
xsi_l, xsi_r = xsort_index[:mid], xsort_index[mid:]
ysi_l = [yi for yi in ysort_index if yi in xsi_l]
ysi_r = [yi for yi in ysort_index if yi in xsi_r]
return [p, None, None, b], xsi_l, xsi_r, ysi_l, ysi_r
def build_range_tree(points, xsort_index, points_r, ysort_index):
root_stuff = range_node_stuff(points, xsort_index,
points_r, ysort_index)
stack = []
if len(xsort_index) > 1:
stack.append(root_stuff)
while stack:
current, xsi_l, xsi_r, ysi_l, ysi_r = stack.pop()
left_stuff = range_node_stuff(points, xsi_l, points_r, ysi_l)
current[1] = left_stuff[0]
if len(xsi_l) > 1:
stack.append(left_stuff)
right_stuff = range_node_stuff(points, xsi_r, points_r, ysi_r)
current[2] = right_stuff[0]
if len(xsi_r) > 1:
stack.append(right_stuff)
return root_stuff[0]
indices = range(len(points))
points_r = [p[::-1] for p in points]
xsort_index = sorted(indices, key=lambda i: points[i])
ysort_index = sorted(indices, key=lambda i: points_r[i])
return build_range_tree(points, xsort_index, points_r, ysort_index)
@staticmethod
def range_tree_query(tree, xmin, xmax, ymin, ymax):
"""
Return the points stored in a range tree that lie inside a rectangular
region
Parameters
----------
root : tuple
Root node of the range tree, as returned from
`PointPattern.range_tree_static`.
xmin, xmax, ymin, ymax : scalars
Limits of the range in which to query the range tree for points.
Limits are inclusive in both ends.
Returns
-------
list
List of coordinate tuples for all points from the tree inside the
given range.
"""
xmin, xmax = (xmin, -numpy.inf), (xmax, numpy.inf)
ymin, ymax = (ymin, -numpy.inf), (ymax, numpy.inf)
def isleaf(node):
return (node[1] is None) and (node[2] is None)
def query(root, min_, max_, report, points):
# Find split node.
split = root
while not isleaf(split):
x = split[0]
if x > max_:
split = split[1]
elif x <= min_:
split = split[2]
else:
break
else:
# The split node is a leaf node. Report if relevant and finish.
if min_ <= split[0] <= max_:
report(split, points)
return
# The split node is a non-leaf node: traverse subtrees and report
# relevant nodes.
# First, take the left subtree.
node = split[1]
while not isleaf(node):
if node[0] > min_:
# The whole right subtree is relevant. Report it.
report(node[2], points)
node = node[1]
else:
node = node[2]
# We end on a leaf node. Report if relevant.
if min_ <= node[0] <= max_:
report(node, points)
# Then take the right subtree.
node = split[2]
while not isleaf(node):
if node[0] <= max_:
# The whole left subtree is relevant. Report it.
report(node[1], points)
node = node[2]
else:
node = node[1]
# We end on a leaf node. Report if relevant.
if min_ <= node[0] <= max_:
report(node, points)
def report_subtree_r(node, points):
stack = [node]
while stack:
node = stack.pop()
if isleaf(node):
points.append(node[0][::-1])
else:
stack.extend(node[1:3])
def report_yquery(node, points):
return query(node[3], ymin, ymax, report_subtree_r, points)
points = []
query(tree, xmin, xmax, report_yquery, points)
return points
#@memoize_method
def range_tree(self, project_points=True):
"""
Construct a range tree from the points in the pattern
Only the actual points in the pattern are added to the range tree --
plus sampling points or points from the periodic extension is never
used.
Parameters
----------
project_points : bool, optional
Passed to `PointPattern.points`.
Returns
-------
Root node of the range tree. For details about the type and format, see
`PointPattern.range_tree_static`.
"""
points = self.points(project_points=project_points)
return self.range_tree_build([tuple(p)
for p in numpy.asarray(points)])
@staticmethod
def pairwise_vectors(pp1, pp2=None):
"""
Return a matrix of vectors between points in a point pattern
:pp1: PointPattern or MultiPoint instance containing the points to find
vectors between
:pp2: if not None, vectors are calculated from points in pp1 to points
in pp2 instead of between points in pp1
:returns: numpy array of where slice [i, j, :] contains the vector
pointing from pp1[i] to pp1[j], or if pp2 is not None, from
pp1[i] to pp2[j]
"""
ap1 = numpy.array(pp1)[:, :2]
if pp2 is not None:
ap2 = numpy.array(pp2)[:, :2]
else:
ap2 = ap1
return ap2 - ap1[:, numpy.newaxis, :]
@staticmethod
def pairwise_distances(pp1, pp2=None):
"""
Return a matrix of distances between points in a point pattern
:pp1: PointPattern or MultiPoint instance containing the points to find
distances between
:pp2: if not None, distances are calculated from points in pp1 to
points in pp2 instead of between points in pp1
:returns: numpy array of where slice [i, j, :] contains the distance
from pp1[i] to pp1[j], or if pp2 is not None, from pp1[i] to
pp2[j]
"""
#diff = PointPattern.pairwise_vectors(pp1, pp2=pp2)
#return numpy.sqrt(numpy.sum(diff * diff, axis=-1))
ap1 = numpy.array(pp1)[:, :2]
if pp2 is not None:
ap2 = numpy.array(pp2)[:, :2]
else:
ap2 = ap1
return distance.cdist(ap1, ap2)
def nearest(self, point, mode='standard'):
"""
Return the point in the pattern closest to the location given by
'point'
:point: Point instance giving the location to find the nearest point to
:mode: string to select the points among which to look for the nearest
point. See the documentation for PointPattern.points() for
details.
:returns: Point instance representing the point in the pattern nearest
'point'
"""
return min(self.points(mode=mode).difference(point),
key=lambda p: point.distance(p))
def nearest_list(self, point, mode='standard'):
"""
Return the list of points in the pattern, sorted by distance to the
location given by 'point'
The list does not include 'point' itself, even if it is part of the
pattern.
:point: Point instance giving the location to sort the points by
distance to.
:mode: string to select the points to sort. See the documentation for
PointPattern.points() for details.
:returns: list of Point instances containing the points in the pattern,
sorted by distance to 'point'.
"""
return sorted(self.points(mode=mode).difference(point),
key=lambda p: point.distance(p))
def intensity(self, mode='standard', r=None):
"""
Compute an intensity estimate, assuming a stationary point pattern
:mode: flag to select the kind of estimator to compute. Possible
values:
'standard': The standard estimator: the number of points in the
pattern divided by the area of the window.
'area': The adapted estimator based on area.
'perimeter': The adapted estimator based on perimeter.
'minus': The standard estimator in a window eroded by the radius r.
'neighbor': The standard estimator subject to nearest neighbor edge
correction.
:r: array-like, containing distances at which to evaluate the intensity
estimator, for modes where this is relevant. For modes where
distance is not relevant, `r` may be omitted.
:returns: scalar or array-like containing the estimated intensities.
"""
window = self.window
if mode == 'standard':
intensity = len(self) / window.area
elif mode in ('area', 'perimeter'):
if mode == 'area':
pfunc = window.p_V
else:
pfunc = window.p_S
intensity = sum(pfunc(p, r) for p in self)
elif mode == 'minus':
try:
r_enum = enumerate(r)
except TypeError:
ew = window.buffer(-r)
intensity = len(self._points.intersection(ew)) / ew.area
else:
intensity = numpy.zeros_like(r)
for (i, rval) in r_enum:
ew = window.buffer(-rval)
intensity[i] = len(self._points.intersection(ew)) / ew.area
elif mode == 'neighbor':
intensity = 0.0
for p in self:
nn_dist = p.distance(self._points.difference(p))
if nn_dist <= p.distance(window.boundary):
intensity += 1.0 / window.buffer(-nn_dist).area
else:
raise ValueError("unknown mode: {}".format(mode))
return intensity
def squared_intensity(self, mode='standard', r=None):
"""
Compute an estimate of the squared intensity, assuming a stationary
point pattern
The estimate is found by squaring an estimate of the intensity, and
multiplying with (n - 1) / n, where n is the number of points in the
pattern, to remove statistical bias due to the squaring.
:mode: flag to select the kind of estimator to compute. The supported
modes are listed in the documentation for
PointPattern.intensity().
# In addition, the
# following mode is supported:
#'corrected': The square of the 'standard' intensity estimate,
# multiplied by (n - 1) / n to give an unbiased
# estimate of the squared intensity.
:r: array-like, containing distances at which to evaluate the squared
intensity estimator, for modes where this is relevant. For modes
where distance is not relevant, `r` may be omitted.
:returns: scalar or array-like containing the estimated squared
intensities.
"""
n = len(self)
#if mode == 'corrected':
# if n == 0:
# return 0.0
#
# lambda_ = self.intensity(mode='standard')
# return lambda_ * lambda_ * (n - 1) / n
#else:
# lambda_ = self.intensity(mode=mode, r=r)
# return lambda_ * lambda_
if n == 0:
return 0.0
lambda_ = self.intensity(mode=mode, r=r)
return lambda_ * lambda_ * (n - 1) / n
def rmax(self, edge_correction=None):
"""
Return the largest relevant interpoint distance for a given edge
correction in the window of this point pattern
Parameters
----------
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
scalar
Largest relevant interpoint distance.
"""
if edge_correction is None:
edge_correction = self.edge_correction
if edge_correction in ('finite', 'plus', 'isotropic'):
return self.window.longest_diagonal()
elif edge_correction == 'periodic':
return 0.5 * self.window.voronoi().longest_diagonal()
elif edge_correction == 'stationary':
return 2.0 * self.window.inscribed_circle()['r']
else:
raise ValueError("unknown edge correction: {}"
.format(edge_correction))
def rvals(self, edge_correction=None):
"""
Construct an array of r values tailored for the empirical K/L-functions
The returned array contains a pair of tightly spaced values around each
vertical step in the K/L-functions, and evenly spaced r values with
moderate resolution elsewhere.
Parameters
----------
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Array of r values tailored to the empirical K/L-functions
"""
if edge_correction is None:
edge_correction = self.edge_correction
rmax = self.rmax(edge_correction=edge_correction)
rvals = numpy.linspace(0.0, rmax, RSAMPLES)
# Get step locations
rsteps, __ = self._estimator_base(edge_correction)
micrormax = 1.e-6 * rmax
rstep_values = numpy.repeat(rsteps, 2)
rstep_values[0::2] -= micrormax
rstep_values[1::2] += micrormax
# Add r values tightly around each step
rstep_indices = numpy.searchsorted(rvals, rstep_values)
rvals = numpy.insert(rvals, rstep_indices, rstep_values)
return rvals
@staticmethod
def pair_weights(window, mp1, mp2, edge_correction):
"""
Compute the weights that pairs of points in a window contribute in the
estimation of second-order summary characteristics
Parameters
----------
window : Window
Window in which the points take values.
mp1, mp2 : MultiPoint
MultiPoint instances containing the points to pair up.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details.
Returns
-------
array
Array containing the weight of the pair `(mp1[i], mp2[j])` in
element `[i, j]`.
"""
if edge_correction in ('finite', 'stationary'):
diff = PointPattern.pairwise_vectors(mp1, mp2)
return 1.0 / window.set_covariance(diff[:, :, 0], diff[:, :, 1])
elif edge_correction == 'periodic':
m, n = len(mp1), len(mp2)
w = numpy.zeros((m, n))
if n < m:
mp1, mp2 = mp2, mp1
wview = w.transpose()
else:
wview = w
mp2_arr = numpy.array(mp2)
voronoi = window.voronoi()
area_inv = 1.0 / voronoi.area
centroid = voronoi.centroid
#distances = PointPattern.pairwise_distances(mp1, mp2)
pdisps = numpy.asarray(mp1) - numpy.asarray(centroid)
for (i, pd) in enumerate(pdisps):
translated_window = affinity.translate(voronoi,
xoff=pd[0], yoff=pd[1])
valid_mp2 = mp2.intersection(translated_window)
vmp2_arr = numpy.atleast_2d(valid_mp2)
vindex = numpy.any(distance.cdist(mp2_arr, vmp2_arr) == 0.0,
axis=-1)
#vindex = numpy.any(
# numpy.all(PointPattern.pairwise_vectors(
# mp2_arr, vmp2_arr) == 0.0, axis=-1), axis=-1)
wview[i, vindex] = area_inv
## Isotropic edge correction (to cancel corner effects that are
## still present for large r)
#for j in numpy.nonzero(vindex)[0]:
# r = distances[i, j]
# ring = centroid.buffer(r).boundary
# wview[i, j] *= (_2PI * r /
# ring.intersection(window).length)
return w
elif edge_correction == 'plus':
m, n = len(mp1), len(mp2)
w = numpy.empty((m, n))
w.fill(1.0 / window.area)
return w
elif edge_correction == 'isotropic':
m, n = len(mp1), len(mp2)
w = numpy.zeros((m, n))
distances = PointPattern.pairwise_distances(mp1, mp2)
for (i, p1) in enumerate(mp1):
for j in range(n):
r = distances[i, j]
ring = p1.buffer(r).boundary
rball = ORIGIN.buffer(r)
doughnut = window.difference(window.erode_by_this(rball))
w[i, j] = _2PI * r / (
window.intersection(ring).length * doughnut.area)
return w
else:
raise ValueError("unknown edge correction: {}"
.format(edge_correction))
@memoize_method
def _estimator_base(self, edge_correction):
"""
Compute the distances between pairs of points in the pattern, and the
weights they contribute in the estimation of second-order
characteristics
Parameters
----------
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details.
Returns
-------
r : array
Array of containing the pairwise distances in the point pattern,
sorted from small to large. Only pairs that actually contribute
with the selected edge correction are included.
weights : array
Array containing the weights associated with pairs in the point
pattern, sorted such that weights[i] gives the weight of the pair
with distance r[i].
"""
rmax = self.rmax(edge_correction=edge_correction)
pmode = self._edge_config[edge_correction]['pmode']
allpoints = self.points(mode=pmode)
distances = self.pairwise_distances(self._points, allpoints)
valid = numpy.logical_and(distances < rmax, distances != 0.0)
index1, = numpy.nonzero(numpy.any(valid, axis=1))
index2, = numpy.nonzero(numpy.any(valid, axis=0))
mp1 = geometry.MultiPoint([self[i] for i in index1])
mp2 = geometry.MultiPoint([allpoints[i] for i in index2])
weight_matrix = self.pair_weights(self.window, mp1, mp2,
edge_correction)
r = distances[valid]
sort_ind = numpy.argsort(r)
r = r[sort_ind]
weights = weight_matrix[valid[index1, :][:, index2]]
weights = weights[sort_ind]
return r, weights
def _cumulative_base(self, edge_correction):
"""
Compute the cumulative weight of the points in the pattern
Parameters
----------
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details.
Returns
-------
rsteps : ndarray
Array containing the r values between `rmin`and `rmax` at which
the cumulative characteristics make jumps.
cweights : ndarray
Array of the same shape as `rsteps`, containing the value of the
cumulated weights just after each step.
"""
rmax = self.rmax(edge_correction=edge_correction)
rsteps, weights = self._estimator_base(edge_correction)
rsteps = numpy.hstack((0.0, rsteps, rmax))
weights = numpy.hstack((0.0, weights, numpy.nan))
cweights = numpy.cumsum(weights)
return rsteps, cweights
def kfunction(self, r, edge_correction=None):
"""
Evaluate the empirical K-function of the point pattern
Parameters
----------
r : array-like
array of values at which to evaluate the emprical K-function.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Values of the empirical K-function evaulated at `r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
rsteps, cweights = self._cumulative_base(
edge_correction=edge_correction)
indices = numpy.searchsorted(rsteps, r, side='right') - 1
imode = self._edge_config[edge_correction]['imode']
lambda2 = self.squared_intensity(mode=imode, r=r)
return sensibly_divide(cweights[indices], lambda2)
def lfunction(self, r, edge_correction=None):
"""
Evaluate the empirical L-function of the point pattern
Parameters
----------
r : array-like
array of values at which to evaluate the emprical L-function.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Values of the empirical L-function evaulated at `r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
return numpy.sqrt(self.kfunction(r, edge_correction=edge_correction) /
_PI)
def pair_corr_function(self, r, bandwidth=None, edge_correction=None):
"""
Evaluate the empirical pair correlation function of the point pattern
Parameters
----------
r : array-like
array of values at which to evaluate the emprical pair correlation
function.
bandwidth : scalar
The bandwidth of the box kernel used to estimate the density of
points pairs at a given distance. If None, the bandwidth is set to
:math:`0.2 / \sqrt(\lambda)`, where :math:`\lambda` is the standard
intensity estimate for the process.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Values of the empirical pair correlation function evaulated at `r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
if bandwidth is None:
bandwidth = 0.2 / numpy.sqrt(self.intensity())
rpairs, weights = self._estimator_base(edge_correction)
# Find the contribution from each pair to each element in `r`
d = numpy.abs(r[numpy.newaxis, ...] - rpairs[..., numpy.newaxis])
w = numpy.sum((d < bandwidth) * weights[..., numpy.newaxis], axis=0)
w *= 1.0 / (2.0 * _2PI * r * bandwidth)
imode = self._edge_config[edge_correction]['imode']
lambda2 = self.squared_intensity(mode=imode, r=r)
return sensibly_divide(w, lambda2)
def kfunction_std(self, r, edge_correction=None):
"""
Compute the theoretical standard deviation of the empirical k-function
of a point pattern like this one, under the CSR hypothesis.
The ``theoretical'' standard deviation is really an empirically
validated formula, and should be a very good fit to the true standard
deviation within the interval given by
`PointPattern.kstatistic_interval`. It is currently only implemented
for periodic boundary conditions -- an array of ones is returned for
other edge corrections.
Parameters
----------
r : array-like
array of values at which to evaluate the emprical K-function
standard deviation.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Values of the standard deviation of the empirical K-function,
evaulated at `r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
r = numpy.asarray(r)
if edge_correction == 'periodic':
imode = self._edge_config[edge_correction]['imode']
squared_intensity = self.squared_intensity(r=r, mode=imode)
voronoi = self.window.voronoi()
area = voronoi.area
npnp_1 = area * area * squared_intensity
kstd = r * numpy.sqrt(2.0 * _PI * voronoi.ball_difference_area(r) /
npnp_1)
# (npnp_1 + 0.5 + numpy.sqrt(npnp_1 + 0.25)))
else:
kstd = numpy.ones_like(r)
return kstd
def kfunction_std_inv(self, r, edge_correction=None):
"""
Compute the inverse of the theoretical standard deviation of the
empirical k-function of a point pattern like this one, under the CSR
hypothesis.
Parameters
----------
r, edge_correction
See `PointPattern.kfunction_std`.
Returns
-------
array
Values of the inverse of the standard deviation of the empirical
K-function, evaulated at `r`.
"""
return 1.0 / self.kfunction_std(r, edge_correction=edge_correction)
def lfunction_std(self, r, edge_correction=None):
"""
Compute the theoretical standard deviation of the empirical L-function
of a point pattern like this one, under the CSR hypothesis.
The ``theoretical'' standard deviation is really an empirically
validated formula, and should be a very good fit to the true standard
deviation within the interval given by
`PointPattern.lstatistic_interval`. It is currently only implemented
for periodic boundary conditions -- an array of ones is returned for
other edge corrections.
Parameters
----------
r : array-like
array of values at which to evaluate the emprical L-function
standard deviation.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
array
Values of the standard deviation of the empirical L-function,
evaulated at `r`.
"""
if edge_correction is None:
edge_correction = self.edge_correction
r = numpy.asarray(r)
if edge_correction == 'periodic':
lstd = (self.kfunction_std(r, edge_correction=edge_correction) /
(2.0 * _PI * r))
else:
lstd = numpy.ones_likes(r)
return lstd
def lfunction_std_inv(self, r, edge_correction=None):
"""
Compute the inverse of the theoretical standard deviation of the
empirical L-function of a point pattern like this one, under the CSR
hypothesis.
Parameters
----------
r, edge_correction
See `PointPattern.lfunction_std`.
Returns
-------
array
Values of the inverse of the standard deviation of the empirical
L-function, evaulated at `r`.
"""
return 1.0 / self.lfunction_std(r, edge_correction=edge_correction)
def kstatistic(self, rmin=None, rmax=None, weight_function=None,
edge_correction=None):
"""
Compute the K test statistic for CSR
The test statstic is defined as max(abs(K(r) - pi * r ** 2)) for
r-values between some minimum and maximum radii.
Parameters
----------
rmin : scalar
The minimum r value to consider when computing the statistic. If
None, the value is set to 0.0.
rmin : scalar
The maximum r value to consider when computing the statistic. If
None, the value is set by the upper limit from
`PointPattern.lstatistic_interval`.
weight_function : callable, optional
If not None, the offset `K(r) - pi * r ** 2` is weighted by
`weight_function(r)`. The function should accept one array-like
argument of r values. A typical example of a relevant weight
function is `pp.kfunction_std_inv(r)`, where `pp` is the
`PointPattern` instance for which the K test statistic is computed.
This weight will compensate for the variation of the variance of
K(r) for different r.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
scalar
The K test statistic.
"""
if edge_correction is None:
edge_correction = self.edge_correction
interval = self.kstatistic_interval(edge_correction=edge_correction)
if rmin is None:
rmin = interval[0]
if rmax is None:
rmax = interval[1]
if rmin > rmax:
raise ValueError("'rmin' is smaller than 'rmax'.")
# The largest deviation between K(r) and r is bound to be at a vertical
# step. We go manual instead of using self.kfunction, in order to get
# it as exactly and cheaply as possible.
rsteps, cweights = self._cumulative_base(
edge_correction=edge_correction)
left = numpy.searchsorted(rsteps, rmin, side='right')
right = numpy.searchsorted(rsteps, rmax, side='left')
# Include endpoints, and extract cweights for the in-between intervals
rsteps = numpy.hstack((rmin, rsteps[left:right], rmax))
cweights = cweights[left - 1:right]
# Compute the K-values just before and after each step
imode = self._edge_config[edge_correction]['imode']
lambda2 = numpy.ones_like(rsteps)
lambda2[:] = self.squared_intensity(mode=imode, r=rsteps)
kvals_low = sensibly_divide(cweights, lambda2[:-1])
kvals_high = sensibly_divide(cweights, lambda2[1:])
# Compute the offset
pi_rsteps_sq = _PI * rsteps * rsteps
offset = numpy.hstack((kvals_low - pi_rsteps_sq[:-1],
kvals_high - pi_rsteps_sq[1:]))
# Weight the offsets by the weight function
if weight_function is not None:
weight = weight_function(rsteps)
weight = numpy.hstack((weight[:-1], weight[1:]))
offset *= weight
return numpy.nanmax(numpy.abs(offset))
def lstatistic(self, rmin=None, rmax=None, weight_function=None,
edge_correction=None):
"""
Compute the L test statistic for CSR
The test statstic is defined as max(abs(L(r) - r)) for r-values between
some minimum and maximum radii. Note that if edge_correction ==
'finite', the power of the L test may depend heavily on the maximum
r-value and the number of points in the pattern, and the statistic
computed by this function may not be adequate.
Parameters
----------
rmin : scalar
The minimum r value to consider when computing the statistic. If
None, the value is set by `PointPattern.lstatistic_interval`.
rmin : scalar
The maximum r value to consider when computing the statistic. If
None, the value is set by `PointPattern.lstatistic_interval`.
weight_function : callable, optional
If not None, the offset `L(r) - r` is weighted by
`weight_function(r)`. The function should accept one array-like
argument of r values. A typical example of a relevant weight
function is `pp.lfunction_std_inv(r)`, where `pp` is the
`PointPattern` instance for which the L test statistic is computed.
This weight will compensate for the variation of the variance of
L(r) for different r.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
scalar
The L test statistic.
"""
if edge_correction is None:
edge_correction = self.edge_correction
interval = self.lstatistic_interval(edge_correction=edge_correction)
if rmin is None:
rmin = interval[0]
if rmax is None:
rmax = interval[1]
# The largest deviation between L(r) and r is bound to be at a vertical
# step. We go manual instead of using self.lfunction, in order to get
# it as exactly and cheaply as possible.
rsteps, cweights = self._cumulative_base(
edge_correction=edge_correction)
valid = numpy.nonzero((rsteps > rmin) & (rsteps < rmax))
# Include endpoints, and extract cweights for the in-between intervals
rsteps = numpy.hstack((rmin, rsteps[valid], rmax))
cweights = numpy.hstack((cweights[valid[0][0] - 1], cweights[valid]))
# Compute the L-values just before and after each step
imode = self._edge_config[edge_correction]['imode']
lambda2 = numpy.ones_like(rsteps)
lambda2[:] = self.squared_intensity(mode=imode, r=rsteps)
lvals_low = numpy.sqrt(sensibly_divide(cweights, _PI * lambda2[:-1]))
lvals_high = numpy.sqrt(sensibly_divide(cweights, _PI * lambda2[1:]))
# Compute the offset
offset = numpy.hstack((lvals_high - rsteps[:-1],
lvals_low - rsteps[1:]))
# Weight the offsets by the theoretical standard deviation at the
# corresponding r values.
if weight_function is not None:
weight = weight_function(rsteps)
weight = numpy.hstack((weight[:-1], weight[1:]))
offset *= weight
return numpy.nanmax(numpy.abs(offset))
@memoize_method
def ksstatistic(self, variation='fasano'):
"""
Compute the 2D Kolmogorov-Smirnov test statistic for CSR
Parameters
----------
variation : {'fasano', 'peacock'}
Flag to select which definition of the 2D extension of the test
statistic to use. See <NAME>., <NAME>., & <NAME>. (2007). The
two-dimensional Kolmogorov-Smirnov test. Proceedings of Science.
Retrieved from http://bura.brunel.ac.uk/handle/2438/1166.
Returns
-------
scalar
The value of the KS test statistic.
"""
if variation == 'fasano':
def piter(points):
for p in numpy.asarray(points):
yield p[0], p[1], True
elif variation == 'peacock':
def piter(points):
parray = numpy.asarray(points)
for (i, p) in enumerate(parray):
for (j, q) in enumerate(parray):
yield p[0], q[1], i == j
else:
raise ValueError("Unknown 'variation': {}".format(variation))
tree = self.range_tree(project_points=True)
points = self.points(project_points=True)
n = len(points)
ks = 0.0
for x, y, ispoint in piter(points):
for (xmin, xmax) in ((0.0, x), (x, 1.0)):
for (ymin, ymax) in ((0.0, y), (y, 1.0)):
np = len(self.range_tree_query(tree, xmin, xmax,
ymin, ymax))
#rect = geometry.Polygon(((xmin, ymin), (xmax, ymin),
# (xmax, ymax), (xmin, ymax)))
#ps = rect.intersection(points)
#if isinstance(ps, geometry.Point):
# np = 1
#else:
# np = len(ps)
new_ks = numpy.abs(n * (xmax - xmin) * (ymax - ymin) - np)
ks = max(ks, new_ks)
# If x, y corresponds to an actual point location, the EDF
# has a jump here, and we should also check the other
# possible value.
if ispoint:
new_ks = numpy.abs(n * (xmax - xmin) * (ymax - ymin) -
(np - 1))
ks = max(ks, new_ks)
return ks / numpy.sqrt(n)
def kstatistic_interval(self, edge_correction=None):
"""
Compute the an appropriate interval over which to evaluate the K test
statistic for this pattern
The interval is defined as [rmin, rmax], where rmax is the the same as
for `PointPattern.lstatistic_interval`, and rmin is a third of the rmin
from `PointPattern.lstatistic_interval`.
Parameters
----------
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
rmin : scalar
The minimum end of the K test statistic interval
rmax : scalar
The maximum end of the K test statistic interval
"""
if edge_correction is None:
edge_correction = self.edge_correction
# rmin: the distance within which the expected number of pairs is
# greater than 0.5 under uniformity
window = self.window
sqi = self.squared_intensity()
rmin = numpy.sqrt(1.0 / (_PI * sqi * window.area))
# rmax: the maximum sensible distance
rmax_absolute = self.rmax(edge_correction=edge_correction)
if edge_correction == 'periodic':
rmax_standard = self.window.voronoi().inscribed_circle()['r']
else:
rmax_standard = self.window.inscribed_circle()['r']
rmax = min(rmax_standard, rmax_absolute)
return rmin, rmax
def lstatistic_interval(self, edge_correction=None):
"""
Compute the an appropriate interval over which to evaluate the L test
statistic for this pattern
The interval is defined as [rmin, rmax], where rmax is the minimum of
the following two alternatives:
- the radius of the largest inscribed circle in the window of the point
pattern, as computed by `Window.inscribed_circle` (if using periodic
edge correction, the radius of the largest inscribed circle in the
Voronoi unit cell of the periodic lattice is used instead),
- the maximum relevant interpoint distance in the point pattern, as
computed by `PointPattern.rmax`.
The value of rmin is set to `1.8 / (intensity * sqrt(area))`, where
`area` is the area of the window of the point pattern, and `intensity`
is the standard intensity estimate of the point pattern.
Parameters
----------
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
Returns
-------
rmin : scalar
The minimum end of the L test statistic interval
rmax : scalar
The maximum end of the L test statistic interval
"""
if edge_correction is None:
edge_correction = self.edge_correction
rmin, rmax = self.kstatistic_interval(edge_correction=edge_correction)
# rmin: increase to 5 expected pairs within this distance
rmin *= numpy.sqrt(10.0)
return rmin, rmax
@memoize_method
def _simulate(self, nsims, process, edge_correction):
"""
Simulate a number of point processes in the same window, and of the
same intensity, as this pattern
This part of `PointPattern.simulate` is factored out to optimize
memoization.
"""
return PointPatternCollection.from_simulation(
nsims, self.window, self.intensity(), process=process,
edge_correction=edge_correction)
def simulate(self, nsims=100, process='binomial', edge_correction=None):
"""
Simulate a number of point processes in the same window, and of the
same intensity, as this pattern
Parameters
----------
nsims : int, optional
The number of point patterns to generate.
process : str {'binomial', 'poisson'}, optional
String to select the kind of process to simulate.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the default edge handling for the simulated
patterns. See the documentation for `PointPattern` for details. If
None, the default edge correction for this pattern is used.
Returns
-------
PointPatternCollection
Collection of the simulated patterns.
"""
if edge_correction is None:
edge_correction = self.edge_correction
return self._simulate(nsims, process, edge_correction)
def plot_kfunction(self, axes=None, edge_correction=None, linewidth=2.0,
csr=False, csr_kw=None, **kwargs):
"""
Plot the empirical K-function for the pattern
Parameters
----------
axes : Axes, optional
Axes instance to add the K-function to. If None (default), the
current Axes instance is used if any, or a new one created.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
linewidth : scalar, optional
The width of the line showing the K-function.
csr : bool, optional
If True, overlay the curve :math:`K(r) = \pi r^2`, which is the
theoretical K-function for complete spatial randomness. The style
of this line may be customized using csr_kw.
csr_kw : dict, optional
Keyword arguments to pass to `axes.plot` when plotting the CSR
curve.
**kwargs : dict, optional
Additional keyword arguments to pass to `axes.plot`. Note in
particular the keywords 'linestyle', 'color' and 'label'.
Returns
-------
list
List of handles to the Line2D instances added to the plot, in the
following order: empirical K-function, CSR curve (optional).
"""
if axes is None:
axes = pyplot.gca()
if edge_correction is None:
edge_correction = self.edge_correction
rvals = self.rvals(edge_correction=edge_correction)
kvals = self.kfunction(rvals, edge_correction=edge_correction)
lines = axes.plot(rvals, kvals, linewidth=linewidth, **kwargs)
if csr:
if csr_kw is None:
csr_kw = {}
kcsr = _PI * rvals * rvals
lines += axes.plot(rvals, kcsr, **csr_kw)
return lines
def plot_lfunction(self, axes=None, edge_correction=None, linewidth=2.0,
csr=False, csr_kw=None, **kwargs):
"""
Plot the empirical L-function for the pattern
Parameters
----------
axes : Axes, optional
Axes instance to add the L-function to. If None (default), the
current Axes instance is used if any, or a new one created.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
linewidth : scalar, optional
The width of the line showing the L-function.
csr : bool, optional
If True, overlay the curve :math:`L(r) = r`, which is the
theoretical L-function for complete spatial randomness. The style
of this line may be customized using csr_kw.
csr_kw : dict, optional
Keyword arguments to pass to `axes.plot` when plotting the CSR
curve.
**kwargs : dict, optional
Additional keyword arguments to pass to `axes.plot`. Note in
particular the keywords 'linestyle', 'color' and 'label'.
Returns
-------
list
List of handles to the Line2D instances added to the plot, in the
following order: empirical L-function, CSR curve (optional).
"""
if axes is None:
axes = pyplot.gca()
if edge_correction is None:
edge_correction = self.edge_correction
rvals = self.rvals(edge_correction=edge_correction)
lvals = self.lfunction(rvals, edge_correction=edge_correction)
lines = axes.plot(rvals, lvals, linewidth=linewidth, **kwargs)
if csr:
if csr_kw is None:
csr_kw = {}
lines += axes.plot(rvals, rvals, **csr_kw)
return lines
def plot_pair_corr_function(self, axes=None, bandwidth=None,
edge_correction=None, linewidth=2.0, csr=False,
csr_kw=None, **kwargs):
"""
Plot the empirical pair correlation function for the pattern
Parameters
----------
axes : Axes, optional
Axes instance to add the K-function to. If None (default), the
current Axes instance is used if any, or a new one created.
bandwidth : scalar
The bandwidth of the box kernel used to estimate the density of
points pairs at a given distance. See the documentation for
`PointPattern.pair_corr_function` for details.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the edge handling to apply in computations. See
the documentation for `PointPattern` for details. If None, the
edge correction falls back to the default value (set at instance
initialization).
linewidth : scalar, optional
The width of the line showing the K-function.
csr : bool, optional
If True, overlay the curve :math:`g(r) = 1`, which is the
theoretical pair correlation function for complete spatial
randomness. The style of this line may be customized using csr_kw.
csr_kw : dict, optional
Keyword arguments to pass to `axes.plot` when plotting the CSR
curve.
**kwargs : dict, optional
Additional keyword arguments to pass to `axes.plot`. Note in
particular the keywords 'linestyle', 'color' and 'label'.
Returns
-------
list
List of handles to the Line2D instances added to the plot, in the
following order: empirical pair correlation function, CSR curve
(optional).
"""
if axes is None:
axes = pyplot.gca()
if edge_correction is None:
edge_correction = self.edge_correction
rmax = self.rmax(edge_correction=edge_correction)
rvals = numpy.linspace(0.0, rmax, RSAMPLES)
gvals = self.pair_corr_function(rvals, bandwidth=bandwidth,
edge_correction=edge_correction)
lines = axes.plot(rvals, gvals, linewidth=linewidth, **kwargs)
if csr:
if csr_kw is None:
csr_kw = {}
gcsr = numpy.ones_like(rvals)
lines += axes.plot(rvals, gcsr, **csr_kw)
return lines
def plot_pattern(self, axes=None, marker='o', periodic_levels=0,
plus=False, window=False, periodic_kw=None, plus_kw=None,
window_kw=None, **kwargs):
"""
Plot point pattern
The point pattern can be added to an existing plot via the optional
'axes' argument.
:axes: Axes instance to add the point pattern to. If None (default),
the current Axes instance with equal aspect ratio is used if
any, or a new one created.
:marker: a valid matplotlib marker specification. Defaults to 'o'
periodic_levels : integer, optional
Add this many levels of periodic extensions of the point pattern to
the plot. See `PointPattern.periodic_extension` for further
explanation.
:plus: if True, add plus sampling points to the plot.
:window: if True, the window boundaries are added to the plot.
:periodic_kw: dict of keyword arguments to pass to the axes.scatter()
method used to plot the periodic extension. Default: None
(empty dict)
:plus_kw: dict of keyword arguments to pass to the axes.scatter()
method used to plot the plus sampling points. Default: None
(empty dict)
:window_kw: dict of keyword arguments to pass to the Window.plot()
method. Default: None (empty dict)
:kwargs: additional keyword arguments passed on to axes.scatter()
method used to plot the point pattern. Note especially the
keywords 'c' (colors), 's' (marker sizes) and 'label'.
:returns: list of the artists added to the plot:
a matplotlib.collections.PathCollection instance for the
point pattern, and optionally another
matplotlib.collections.PathCollection instance for each of
the periodic extension and the plus sampling points, and
finally a a matplotlib.patches.Polygon instance for the
window.
"""
if axes is None:
axes = pyplot.gca(aspect='equal')
cent = self.window.centroid
diag = self.window.longest_diagonal()
axes.set(xlim=(cent.x - diag, cent.x + diag),
ylim=(cent.y - diag, cent.y + diag))
pp = numpy.asarray(self._points)
h = [axes.scatter(pp[:, 0], pp[:, 1], marker=marker, **kwargs)]
if periodic_levels > 0:
if periodic_kw is None:
periodic_kw = {}
pp = numpy.asarray(self.periodic_extension(periodic_levels))
h.append(axes.scatter(pp[:, 0], pp[:, 1], marker=marker,
**periodic_kw))
if plus:
if plus_kw is None:
plus_kw = {}
pp = numpy.asarray(self.pluspoints)
h.append(axes.scatter(pp[:, 0], pp[:, 1], marker=marker,
**plus_kw))
if window:
if window_kw is None:
window_kw = {}
wpatch = self.window.plot(axes=axes, **window_kw)
h.append(wpatch)
return h
class PointPatternCollection(AlmostImmutable, Sequence):
"""
Represent a collection of planar point patterns defined in the same window,
and provide methods to compute statistics over them.
Parameters
----------
patterns : sequence
List of PointPattern instnaces to include in the collection.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the default edge handling to apply in computations.
See the documentation for `PointPattern` for details.
"""
def __init__(self, patterns, edge_correction='stationary'):
self.patterns = list(patterns)
self.edge_correction = edge_correction
@classmethod
def from_simulation(cls, nsims, window, intensity, process='binomial',
edge_correction='stationary'):
"""
Create a PointPatternCollection instance by simulating a number of
point patterns in the same window
Parameters
----------
nsims : integer
The number of point patterns to generate.
window : Window
Window instance to simulate the process within.
intensity : scalar
The intensity (density of points) of the process.
process : str {'binomial', 'poisson'}, optional
String to select the kind of process to simulate.
edge_correction : str {'stationary', 'finite', 'isotropic', 'periodic',
'plus'}, optional
String to select the default edge handling to apply in
computations. See the documentation for `PointPattern` for details.
Returns
-------
PointPatternCollection
Collection of the simulated processes
"""
xmin, ymin, xmax, ymax = window.bounds
area_factor = (xmax - xmin) * (ymax - ymin) / window.area
nmean = intensity * window.area
if process == 'poisson':
nlist = numpy.random.poisson(nmean, nsims)
elif process == 'binomial':
nlist = numpy.empty((nsims,), dtype=numpy.int_)
nlist.fill(round(nmean))
else:
raise ValueError("unknown point process: {}".format(process))
patterns = []
for n in nlist:
points = []
left = n
while left > 0:
ndraw = int(area_factor * left)
draw = numpy.column_stack(
(numpy.random.uniform(low=xmin, high=xmax, size=ndraw),
| numpy.random.uniform(low=ymin, high=ymax, size=ndraw) | numpy.random.uniform |
from model.ddpg.actor import ActorNetwork
from model.ddpg.critic import CriticNetwork
from model.ddpg.ddpg import DDPG
from model.ddpg.ornstein_uhlenbeck import OrnsteinUhlenbeckActionNoise
import numpy as np
import tflearn
import tensorflow as tf
import json
from stock_model_train import StockActor, StockCritic, obs_normalizer,\
test_model, get_variable_scope, test_model_multiple, obs_normalizer
from utils.clientutils import *
from model.supervised.lstm import StockLSTM
from model.supervised.cnn import StockCNN
from utils.data import read_stock_history, read_uptodate_data, fetch_data
from utils.modelutils import *
import copy
class ddpg_restore_model():
def __init__(self, train_id):
self.model_save_path = get_model_path(train_id)
self.summary_path = get_result_path(train_id)
self.config_path = "./train_package/" + str(train_id) + "/stock.json"
self.config = load_config(config_path = self.config_path)
self.batch_size = self.config["training"]["batch size"]
self.action_bound = self.config["training"]["action_bound"]
self.tau = self.config["training"]["tau"]
self.feature_number = self.config["input"]["feature_number"]
self.window_length = self.config["input"]["window_length"]
self.predictor_type = self.config["input"]["predictor_type"]
self.use_batch_norm = self.config["input"]["use_batch_norm"]
self.testing_stocks = self.config["input"]["stocks"]
self.trading_cost = self.config["input"]["trading_cost"]
self.time_cost = self.config["input"]["time_cost"]
self.testing_start_time = self.config["testing"]["testing_start_time"]
self.testing_end_time = self.config["testing"]["testing_end_time"]
self.activation_function = self.config["layers"]["activation_function"]
self.train_id = train_id
def restore(self):
self.start_session()
nb_classes = len(self.testing_stocks) + 1
action_dim, state_dim = [nb_classes], [nb_classes, self.window_length]
variable_scope = get_variable_scope(self.window_length, self.predictor_type, self.use_batch_norm)
with tf.variable_scope(variable_scope):
actor = StockActor(sess = self.sess,
feature_number = self.feature_number,
state_dim = state_dim,
action_dim = action_dim,
action_bound = self.action_bound,
learning_rate = self.config["training"]["actor learning rate"],
decay_rate = self.config["training"]["actor decay rate"],
decay_steps = self.config["training"]["actor decay steps"],
weight_decay = self.config["training"]["actor weight decay"],
tau = self.tau,
batch_size = self.batch_size,
predictor_type = self.predictor_type,
use_batch_norm = self.use_batch_norm,
activation_function = self.activation_function)
critic = StockCritic(sess = self.sess,
feature_number = self.feature_number,
state_dim = state_dim,
action_dim = action_dim,
tau = self.tau,
learning_rate = self.config["training"]["critic learning rate"],
decay_rate = self.config["training"]["critic decay rate"],
decay_steps = self.config["training"]["critic decay steps"],
weight_decay = self.config["training"]["critic weight decay"],
num_actor_vars = actor.get_num_trainable_vars(),
predictor_type = self.predictor_type,
use_batch_norm = self.use_batch_norm,
activation_function = self.activation_function)
actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim))
print(self.model_save_path)
ddpg_model = DDPG(env = None,
sess = self.sess,
actor = actor,
critic = critic,
actor_noise = actor_noise,
obs_normalizer = obs_normalizer,
config =self.config,
model_save_path = self.model_save_path,
summary_path = self.summary_path)
ddpg_model.initialize(load_weights=True, verbose=False)
self.model = ddpg_model
def predict_single(self, model, observation, previous_w):
return model.predict_single(observation, previous_w, self.feature_number)
# def fetch_data(self, testing_start_time, testing_end_time, window_length, testing_stocks):
# history, time_list = read_uptodate_data(testing_stocks, rootpath = "utils")
# total_length = len(time_list)
# try:
# if testing_start_time in time_list:
# self.testing_start_time = total_length - time_list.index(testing_start_time)
# else:
# for time in time_list:
# if testing_start_time < time:
# self.testing_start_time = total_length - time_list.index(time)
# break;
# assert isinstance(self.testing_start_time, int), "not a valid testing start date"
# except ValueError as e:
# print('testing_start_time error', e)
# try:
# if testing_end_time in time_list:
# self.testing_end_time = total_length - time_list.index(testing_end_time)
# else:
# for i,time in enumerate(time_list):
# if (i != len(time_list) - 1 and testing_end_time > time and testing_end_time < time_list[i+1]) or \
# i == len(time_list) - 1:
# self.testing_end_time = total_length - time_list.index(time)
# break;
# assert isinstance(self.testing_start_time, int), "not a valid testing end date"
# except ValueError as e:
# print('testing_end_time error', e)
# # print(self.training_end_time, self.training_start_time)
# assert self.testing_end_time < self.testing_start_time, "testing start date must be earlier then traing end date"
# if self.testing_end_time == 1:
# print("The testing start date and end date1 are from %s to %s" %(time_list[-self.testing_start_time],\
# time_list[-1]))
# return history[:,-self.testing_start_time - window_length:,:],\
# time_list[-self.testing_start_time:]
# else:
# print("The testing start date and end date2 are from %s to %s" %(time_list[-self.testing_start_time],\
# time_list[-self.testing_end_time]))
# return history[:,-self.testing_start_time - window_length: -self.testing_end_time,:], \
# time_list[-self.testing_start_time: -self.testing_end_time]
def backtest(self, start_date=None, end_date=None):
if start_date is not None:
self.testing_start_time = start_date
if end_date is not None:
self.testing_end_time = end_date
print("bactesting period is from %s to %s"%(self.testing_start_time,self.testing_end_time))
nb_classes = len(self.testing_stocks) + 1
testing_history, self.time_list, self.testing_start_time, self.testing_end_time \
= fetch_data(start_time = self.testing_start_time,
end_time = self.testing_end_time,
window_length = self.window_length,
stocks = self.testing_stocks)
# print("this is testing_history")
# print(testing_history)
# print("teststage time list is ", time_list)
# print(self.testing_start_time, self.testing_end_time)
print("total testing example is %d" %(self.testing_start_time-self.testing_end_time))
prev_pv = 1
prev_action = np.zeros(shape=(1,nb_classes))
prev_action[0][0] = 1
action_list = []
share_list, share_change_list = [], []
pv_list, actualPV_list = [], []
price_list =[]
mu1_list = []
#add a W' change list so we could see the change in weight every day
unadjust_weight_list = []
for step in range(-self.testing_end_time +self.testing_start_time):
observation = testing_history[:, step:step + self.window_length, :].copy()
cash_observation = np.ones((1, self.window_length, observation.shape[2]))
observation = np.concatenate((cash_observation, observation), axis=0)
action = self.predict_single(self.model, observation, prev_action)
date = step + self.window_length
today_price = testing_history[:, date-1, 4]
prev_price = testing_history[:, date-2, 4]
cash_price = np.ones((1,))
today_price = | np.concatenate((cash_price, today_price), axis=0) | numpy.concatenate |
from typing import TypeVar
import numpy as np
import xarray as xr
PreserveFloatRangeType = TypeVar("PreserveFloatRangeType", xr.DataArray, np.ndarray)
LevelsType = TypeVar("LevelsType", xr.DataArray, np.ndarray)
def preserve_float_range(
array: PreserveFloatRangeType,
rescale: bool = False,
preserve_input: bool = False,
) -> PreserveFloatRangeType:
"""
Clips values below zero to zero. If values above one are detected, clips them
to 1 unless `rescale` is True, in which case the input is scaled by
the max value and the linearity of the dynamic range is preserved.
Input arrays may be modified by this operation if `preserve_input` is False. There is no
guarantee that the input array is returned even if `preserve_input` is True, however.
Parameters
----------
array : Union[xr.DataArray, np.ndarray]
Array whose values should be in the interval [0, 1] but may not be.
rescale : bool
If true, scale values by the max.
preserve_input : bool
If True, ensure that we do not modify the input data. This may either be done by making a
copy of the input data or ensuring that the operation does not modify the input array.
Even if `preserve_input` is True, modifications to the resulting array may modify the input
array.
Returns
-------
array : Union[xr.DataArray, np.ndarray]
Array whose values are in the interval [0, 1].
"""
return levels(array, rescale=False, rescale_saturated=rescale, preserve_input=preserve_input)
def levels(
array: LevelsType,
rescale: bool = False,
rescale_saturated: bool = False,
preserve_input: bool = False,
) -> LevelsType:
"""
Clips values below zero to zero. If values above one are detected, clip them to 1 if both
``rescale`` and ``rescale_saturated`` are False. If ``rescale`` is True, then the input is
rescaled by the peak intensity. If ``rescale_saturated`` is True, then the image is rescaled
by the peak intensity provided the peak intensity is saturated. It is illegal for both
``rescale`` and ``rescale_saturated`` to be True.
Input arrays may be modified by this operation if `preserve_input` is False. There is no
guarantee that the input array is returned even if `preserve_input` is True, however.
Parameters
----------
array : Union[xr.DataArray, np.ndarray]
Array whose values should be in the interval [0, 1] but may not be.
rescale : bool
If true, scale values by the max.
rescale_saturated : bool
If true, scale values by the max if the max is saturated (i.e., >= 1).
preserve_input : bool
If True, ensure that we do not modify the input data. This may either be done by making a
copy of the input data or ensuring that the operation does not modify the input array.
Even if `preserve_input` is True, modifications to the resulting array may modify the input
array.
Returns
-------
array : Union[xr.DataArray, np.ndarray]
Array whose values are in the interval [0, 1].
"""
if rescale and rescale_saturated:
raise ValueError("rescale and rescale_saturated cannot both be set.")
if isinstance(array, xr.DataArray):
# do a shallow copy
array = array.copy(deep=False)
data = array.values
else:
data = array
casted = data.astype(np.float32, copy=False)
if casted is not data:
preserve_input = False
data = casted
if preserve_input or not data.flags['WRITEABLE']:
# if we still want a copy, check to see if any modifications would be made. if so, make a
# copy.
belowzero = | np.any(data < 0) | numpy.any |
"""
gui/average3
~~~~~~~~~~~~~~~~~~~~
Graphical user interface for three-dimensional averaging of particles
:author: <NAME>, 2017-2018
:copyright: Copyright (c) 2017-2018 Jungmann Lab, MPI of Biochemistry
"""
import os.path
import sys
import traceback
import colorsys
import matplotlib.pyplot as plt
import numba
import numpy as np
import scipy
from scipy import signal
from PyQt5 import QtCore, QtGui, QtWidgets
from .. import io, lib, render
from numpy.lib.recfunctions import stack_arrays
from cmath import rect, phase
from tqdm import tqdm
import scipy.ndimage.filters
DEFAULT_OVERSAMPLING = 1.0
INITIAL_REL_MAXIMUM = 2.0
ZOOM = 10 / 7
N_GROUP_COLORS = 8
@numba.jit(nopython=True, nogil=True)
def render_hist(x, y, oversampling, t_min, t_max):
n_pixel = int(np.ceil(oversampling * (t_max - t_min)))
in_view = (x > t_min) & (y > t_min) & (x < t_max) & (y < t_max)
x = x[in_view]
y = y[in_view]
x = oversampling * (x - t_min)
y = oversampling * (y - t_min)
image = np.zeros((n_pixel, n_pixel), dtype=np.float32)
render._fill(image, x, y)
return len(x), image
@numba.jit(nopython=True, nogil=True)
def render_histxyz(a, b, oversampling, a_min, a_max, b_min, b_max):
n_pixel_a = int(np.ceil(oversampling * (a_max - a_min)))
n_pixel_b = int(np.ceil(oversampling * (b_max - b_min)))
in_view = (a > a_min) & (b > b_min) & (a < a_max) & (b < b_max)
a = a[in_view]
b = b[in_view]
a = oversampling * (a - a_min)
b = oversampling * (b - b_min)
image = np.zeros((n_pixel_b, n_pixel_a), dtype=np.float32)
render._fill(image, a, b)
return len(a), image
def rotate_axis(axis, vx, vy, vz, angle, pixelsize):
if axis == "z":
vx_rot = np.cos(angle) * vx - np.sin(angle) * vy
vy_rot = np.sin(angle) * vx + np.cos(angle) * vy
vz_rot = vz
elif axis == "y":
vx_rot = np.cos(angle) * vx + np.sin(angle) * np.divide(vz, pixelsize)
vy_rot = vy
vz_rot = -np.sin(angle) * vx * pixelsize + np.cos(angle) * vz
elif axis == "x":
vx_rot = vx
vy_rot = np.cos(angle) * vy - np.sin(angle) * np.divide(vz, pixelsize)
vz_rot = np.sin(angle) * vy * pixelsize + np.cos(angle) * vz
return vx_rot, vy_rot, vz_rot
def compute_xcorr(CF_image_avg, image):
F_image = np.fft.fft2(image)
xcorr = np.fft.fftshift(np.real(np.fft.ifft2((F_image * CF_image_avg))))
return xcorr
class ParametersDialog(QtWidgets.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Parameters")
self.setModal(False)
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("Oversampling:"), 0, 0)
self.oversampling = QtWidgets.QDoubleSpinBox()
self.oversampling.setRange(1, 200)
self.oversampling.setValue(DEFAULT_OVERSAMPLING)
self.oversampling.setDecimals(1)
self.oversampling.setKeyboardTracking(False)
self.oversampling.valueChanged.connect(self.window.updateLayout)
grid.addWidget(self.oversampling, 0, 1)
self.iterations = QtWidgets.QSpinBox()
self.iterations.setRange(1, 1)
self.iterations.setValue(1)
class View(QtWidgets.QLabel):
def __init__(self, window):
super().__init__()
self.window = window
self.setMinimumSize(1, 1)
self.setAlignment(QtCore.Qt.AlignCenter)
self.setAcceptDrops(True)
self._pixmap = None
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == ".hdf5":
self.open(path)
def resizeEvent(self, event):
if self._pixmap is not None:
self.set_pixmap(self._pixmap)
def set_image(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap("magma")(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype("uint8")
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
self._pixmap = QtGui.QPixmap.fromImage(qimage)
self.set_pixmap(self._pixmap)
def set_pixmap(self, pixmap):
self.setPixmap(
pixmap.scaled(
self.width(),
self.height(),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.FastTransformation,
)
)
def update_image(self, *args):
oversampling = self.window.parameters_dialog.oversampling.value()
t_min = -self.r
t_max = self.r
N_avg, image_avg = render.render_hist(
self.locs, oversampling, t_min, t_min, t_max, t_max
)
self.set_image(image_avg)
class DatasetDialog(QtWidgets.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Datasets")
self.setModal(False)
self.layout = QtWidgets.QVBoxLayout()
self.checks = []
self.setLayout(self.layout)
def add_entry(self, path):
c = QtWidgets.QCheckBox(path)
self.layout.addWidget(c)
self.checks.append(c)
self.checks[-1].setChecked(True)
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Picasso: Average3")
self.resize(1024, 512)
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "average.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.setAcceptDrops(True)
self.parameters_dialog = ParametersDialog(self)
self.dataset_dialog = DatasetDialog(self)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu("File")
open_action = file_menu.addAction("Open")
open_action.setShortcut(QtGui.QKeySequence.Open)
open_action.triggered.connect(self.open)
file_menu.addAction(open_action)
save_action = file_menu.addAction("Save")
save_action.setShortcut(QtGui.QKeySequence.Save)
save_action.triggered.connect(self.save)
file_menu.addAction(save_action)
process_menu = menu_bar.addMenu("Process")
parameters_action = process_menu.addAction("Parameters")
parameters_action.setShortcut("Ctrl+P")
parameters_action.triggered.connect(self.parameters_dialog.show)
dataset_action = process_menu.addAction("Datasets")
dataset_action.triggered.connect(self.dataset_dialog.show)
self.status_bar = self.statusBar()
self._pixmap = None
self.locs = []
self.z_state = []
self.group_index = []
self.infos = []
self.locs_paths = []
self._mode = "Zoom"
self._pan = False
self._size_hint = (768, 768)
self.n_locs = 0
self._picks = []
self.index_blocks = []
self._drift = []
# Define DisplaySettingsDialog
self.viewxy = QtWidgets.QLabel("")
self.viewxz = QtWidgets.QLabel("")
self.viewyz = QtWidgets.QLabel("")
self.viewcp = QtWidgets.QLabel("")
minsize = 512
self.viewxy.setFixedWidth(minsize)
self.viewxy.setFixedHeight(minsize)
self.viewxz.setFixedWidth(minsize)
self.viewxz.setFixedHeight(minsize)
self.viewyz.setFixedWidth(minsize)
self.viewyz.setFixedHeight(minsize)
self.viewcp.setFixedWidth(minsize)
self.viewcp.setFixedHeight(minsize)
# Define layout
display_groupbox = QtWidgets.QGroupBox("Display")
displaygrid = QtWidgets.QGridLayout(display_groupbox)
displaygrid.addWidget(QtWidgets.QLabel("XY"), 0, 0)
displaygrid.addWidget(self.viewxy, 1, 0)
displaygrid.addWidget(QtWidgets.QLabel("XZ"), 0, 1)
displaygrid.addWidget(self.viewxz, 1, 1)
displaygrid.addWidget(QtWidgets.QLabel("YZ"), 2, 0)
displaygrid.addWidget(self.viewyz, 3, 0)
displaygrid.addWidget(QtWidgets.QLabel("CP"), 2, 1)
displaygrid.addWidget(self.viewcp, 3, 1)
button_groupbox = QtWidgets.QGroupBox("Buttons")
buttongrid = QtWidgets.QGridLayout(button_groupbox)
rotation_groupbox = QtWidgets.QGroupBox("Rotation + Translation")
rotationgrid = QtWidgets.QGridLayout(rotation_groupbox)
centerofmassbtn = QtWidgets.QPushButton("Center of Mass XYZ")
axis_groupbox = QtWidgets.QGroupBox("Axis")
axisgrid = QtWidgets.QGridLayout(axis_groupbox)
self.x_axisbtn = QtWidgets.QRadioButton("X")
self.y_axisbtn = QtWidgets.QRadioButton("Y")
self.z_axisbtn = QtWidgets.QRadioButton("Z")
self.z_axisbtn.setChecked(True)
axisgrid.addWidget(self.x_axisbtn, 0, 0)
axisgrid.addWidget(self.y_axisbtn, 0, 1)
axisgrid.addWidget(self.z_axisbtn, 0, 2)
proj_groupbox = QtWidgets.QGroupBox("Projection")
projgrid = QtWidgets.QGridLayout(proj_groupbox)
self.xy_projbtn = QtWidgets.QRadioButton("XY")
self.yz_projbtn = QtWidgets.QRadioButton("YZ")
self.xz_projbtn = QtWidgets.QRadioButton("XZ")
self.xy_projbtn.setChecked(True)
projgrid.addWidget(self.xy_projbtn, 0, 0)
projgrid.addWidget(self.yz_projbtn, 0, 1)
projgrid.addWidget(self.xz_projbtn, 0, 2)
rotatebtn = QtWidgets.QPushButton("Rotate")
self.radio_sym = QtWidgets.QRadioButton("x symmetry")
self.symEdit = QtWidgets.QSpinBox()
self.symEdit.setRange(2, 100)
self.symEdit.setValue(8)
self.radio_sym_custom = QtWidgets.QRadioButton("custom symmetry")
self.symcustomEdit = QtWidgets.QLineEdit("90,180,270")
deg_groupbox = QtWidgets.QGroupBox("Degrees")
deggrid = QtWidgets.QGridLayout(deg_groupbox)
self.full_degbtn = QtWidgets.QRadioButton("Full")
self.part_degbtn = QtWidgets.QRadioButton("Part")
self.degEdit = QtWidgets.QTextEdit()
self.degEdit = QtWidgets.QSpinBox()
self.degEdit.setRange(1, 10)
self.degEdit.setValue(5)
deggrid.addWidget(self.full_degbtn, 0, 0)
deggrid.addWidget(self.part_degbtn, 0, 1)
deggrid.addWidget(self.degEdit, 0, 2)
self.full_degbtn.setChecked(True)
# Rotation Groupbox
rotationgrid.addWidget(axis_groupbox, 0, 0, 1, 2)
rotationgrid.addWidget(proj_groupbox, 1, 0, 1, 2)
rotationgrid.addWidget(deg_groupbox, 2, 0, 1, 2)
rotationgrid.addWidget(rotatebtn, 3, 0, 1, 2)
rotationgrid.addWidget(self.symEdit, 4, 0)
rotationgrid.addWidget(self.radio_sym, 4, 1)
rotationgrid.addWidget(self.radio_sym_custom, 5, 0)
rotationgrid.addWidget(self.symcustomEdit, 5, 1)
buttongrid.addWidget(centerofmassbtn, 0, 0)
buttongrid.addWidget(rotation_groupbox, 1, 0)
centerofmassbtn.clicked.connect(self.centerofmass)
rotatebtn.clicked.connect(self.rotate_groups)
self.translatebtn = QtWidgets.QCheckBox("Translate only")
self.flipbtn = QtWidgets.QCheckBox("Consider flipped structures")
self.alignxbtn = QtWidgets.QPushButton("Align X")
self.alignybtn = QtWidgets.QPushButton("Align Y")
self.alignzzbtn = QtWidgets.QPushButton("Align Z_Z")
self.alignzybtn = QtWidgets.QPushButton("Align Z_Y")
self.translatexbtn = QtWidgets.QPushButton("Translate X")
self.translateybtn = QtWidgets.QPushButton("Translate Y")
self.translatezbtn = QtWidgets.QPushButton("Translate Z")
self.rotatexy_convbtn = QtWidgets.QPushButton("Rotate XY - Convolution")
self.scorebtn = QtWidgets.QPushButton("Calculate Score")
operate_groupbox = QtWidgets.QGroupBox("Operate")
operategrid = QtWidgets.QGridLayout(operate_groupbox)
rotationgrid.addWidget(self.translatebtn, 7, 0)
rotationgrid.addWidget(self.flipbtn, 8, 0)
self.x_range = QtWidgets.QLineEdit("-3,3")
rotationgrid.addWidget(QtWidgets.QLabel("x-Range (Px)"), 9, 0)
rotationgrid.addWidget(self.x_range, 9, 1)
self.y_range = QtWidgets.QLineEdit("-3,3")
rotationgrid.addWidget(QtWidgets.QLabel("y-Range (Px)"), 10, 0)
rotationgrid.addWidget(self.y_range, 10, 1)
self.z_range = QtWidgets.QLineEdit("-1000,1000")
rotationgrid.addWidget(QtWidgets.QLabel("z-Range (nm)"), 11, 0)
rotationgrid.addWidget(self.z_range, 11, 1)
self.z_range.textChanged.connect(self.adjust_z)
self.x_range.textChanged.connect(self.adjust_xy)
self.y_range.textChanged.connect(self.adjust_xy)
operategrid.addWidget(self.alignxbtn, 0, 1)
operategrid.addWidget(self.alignybtn, 1, 1)
operategrid.addWidget(self.alignzzbtn, 2, 1)
operategrid.addWidget(self.alignzybtn, 3, 1)
operategrid.addWidget(self.translatexbtn, 0, 0)
operategrid.addWidget(self.translateybtn, 1, 0)
operategrid.addWidget(self.translatezbtn, 2, 0)
operategrid.addWidget(self.rotatexy_convbtn, 4, 0)
operategrid.addWidget(self.scorebtn, 4, 1)
self.rotatexy_convbtn.clicked.connect(self.rotatexy_convolution)
self.alignxbtn.clicked.connect(self.align_x)
self.alignybtn.clicked.connect(self.align_y)
self.alignzzbtn.clicked.connect(self.align_zz)
self.alignzybtn.clicked.connect(self.align_zy)
self.translatexbtn.clicked.connect(self.translate_x)
self.translateybtn.clicked.connect(self.translate_y)
self.translatezbtn.clicked.connect(self.translate_z)
self.scorebtn.clicked.connect(self.calculate_score)
buttongrid.addWidget(operate_groupbox, 2, 0)
self.contrastEdit = QtWidgets.QDoubleSpinBox()
self.contrastEdit.setDecimals(1)
self.contrastEdit.setRange(0, 10)
self.contrastEdit.setValue(0.5)
self.contrastEdit.setSingleStep(0.1)
self.contrastEdit.valueChanged.connect(self.updateLayout)
self.grid = QtWidgets.QGridLayout()
self.grid.addWidget(display_groupbox, 0, 0, 2, 1)
self.grid.addWidget(button_groupbox, 0, 1, 1, 1)
contrast_groupbox = QtWidgets.QGroupBox("Contrast")
contrastgrid = QtWidgets.QGridLayout(contrast_groupbox)
contrastgrid.addWidget(self.contrastEdit)
buttongrid.addWidget(contrast_groupbox)
MODEL_X_DEFAULT = "0,20,40,60,0,20,40,60,0,20,40,60"
MODEL_Y_DEFAULT = "0,20,40,0,20,40,0,20,40,0,20,40"
MODEL_Z_DEFAULT = "0,0,0,0,0,0,0,0,0,0,0,0"
self.modelchk = QtWidgets.QCheckBox("Use Model")
self.model_x = QtWidgets.QLineEdit(MODEL_X_DEFAULT)
self.model_y = QtWidgets.QLineEdit(MODEL_Y_DEFAULT)
self.model_z = QtWidgets.QLineEdit(MODEL_Z_DEFAULT)
self.model_preview_btn = QtWidgets.QPushButton("Preview")
self.model_preview_btn.clicked.connect(self.model_preview)
self.modelblurEdit = QtWidgets.QDoubleSpinBox()
self.modelblurEdit.setDecimals(1)
self.modelblurEdit.setRange(0, 10)
self.modelblurEdit.setValue(0.5)
self.modelblurEdit.setSingleStep(0.1)
self.pixelsizeEdit = QtWidgets.QSpinBox()
self.pixelsizeEdit.setRange(1, 999)
self.pixelsizeEdit.setValue(130)
model_groupbox = QtWidgets.QGroupBox("Model")
modelgrid = QtWidgets.QGridLayout(model_groupbox)
modelgrid.addWidget(self.modelchk, 0, 0)
modelgrid.addWidget(QtWidgets.QLabel("X-Coordinates"), 1, 0)
modelgrid.addWidget(self.model_x, 1, 1)
modelgrid.addWidget(QtWidgets.QLabel("Y-Coordinates"), 2, 0)
modelgrid.addWidget(self.model_y, 2, 1)
modelgrid.addWidget(QtWidgets.QLabel("Z-Coordinates"), 3, 0)
modelgrid.addWidget(self.model_z, 3, 1)
modelgrid.addWidget(QtWidgets.QLabel("Blur:"), 4, 0)
modelgrid.addWidget(self.modelblurEdit, 4, 1)
modelgrid.addWidget(QtWidgets.QLabel("Pixelsize:"), 5, 0)
modelgrid.addWidget(self.pixelsizeEdit, 5, 1)
modelgrid.addWidget(self.model_preview_btn, 6, 0)
modelgrid.addWidget(self.modelchk, 6, 1)
buttongrid.addWidget(model_groupbox)
mainWidget = QtWidgets.QWidget()
mainWidget.setLayout(self.grid)
self.setCentralWidget(mainWidget)
self.status_bar.showMessage("Average3 ready.")
def open(self):
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Open localizations", filter="*.hdf5"
)
if path:
self.add(path)
def save(self, path):
n_channels = len(self.locs)
for i in range(n_channels):
cx = self.infos[i][0]["Width"] / 2
cy = self.infos[i][0]["Height"] / 2
out_locs = self.locs[i].copy()
out_locs.x += cx
out_locs.y += cy
info = self.infos[i] + [{"Generated by": "Picasso Average3"}]
if not self.z_state[i]:
out_locs = lib.remove_from_rec(out_locs, "z")
out_path = os.path.splitext(self.locs_paths[i])[0] + "_avg3.hdf5"
path, exe = QtWidgets.QFileDialog.getSaveFileName(
self, "Save localizations", out_path, filter="*.hdf5"
)
io.save_locs(path, out_locs, info)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == ".hdf5":
print("Opening {} ..".format(path))
self.add(path)
def add(self, path, rendermode=True):
try:
locs, info = io.load_locs(path, qt_parent=self)
except io.NoMetadataFileError:
return
if len(self.locs) == 0:
self.pixelsize = 0
if not hasattr(locs, "group"):
msgBox = QtWidgets.QMessageBox(self)
msgBox.setWindowTitle("Error")
msgBox.setText(
("Datafile does not contain group information."
" Please load file with picked localizations.")
)
msgBox.exec_()
else:
locs = lib.ensure_sanity(locs, info)
if not hasattr(locs, "z"):
locs = lib.append_to_rec(locs, locs.x.copy(), "z")
self.pixelsize = 1
has_z = False
else:
has_z = True
if self.pixelsize == 0:
pixelsize, ok = QtWidgets.QInputDialog.getInt(
self,
"Pixelsize Dialog",
"Please enter the pixelsize in nm",
130,
)
if ok:
self.pixelsize = pixelsize
else:
self.pixelsize = 130
self.locs.append(locs)
self.z_state.append(has_z)
self.infos.append(info)
self.locs_paths.append(path)
self.index_blocks.append(None)
self._drift.append(None)
self.dataset_dialog.add_entry(path)
self.dataset_dialog.checks[-1].stateChanged.connect(
self.updateLayout
)
cx = self.infos[-1][0]["Width"] / 2
cy = self.infos[-1][0]["Height"] / 2
self.locs[-1].x -= cx
self.locs[-1].y -= cy
if len(self.locs) == 1:
self.median_lp = np.mean(
[np.median(locs.lpx), np.median(locs.lpy)]
)
if hasattr(locs, "group"):
groups = np.unique(locs.group)
groupcopy = locs.group.copy()
for i in range(len(groups)):
groupcopy[locs.group == groups[i]] = i
np.random.shuffle(groups)
groups %= N_GROUP_COLORS
self.group_color = groups[groupcopy]
if render:
self.fit_in_view(autoscale=True)
else:
if render:
self.update_scene()
self.oversampling = 1
if len(self.locs) == 1:
self.t_min = np.min([np.min(locs.x), np.min(locs.y)])
self.t_max = np.max([np.max(locs.x), np.max(locs.y)])
self.z_min = np.min(locs.z)
self.z_max = np.max(locs.z)
else:
self.t_min = np.min(
[np.min(locs.x), np.min(locs.y), self.t_min]
)
self.t_max = np.max(
[np.max(locs.x), np.max(locs.y), self.t_max]
)
self.z_min = np.min([np.min(locs.z), self.z_min])
self.z_max = np.min([np.max(locs.z), self.z_max])
if len(self.locs) == 1:
print("Dataset loaded from {}.".format(path))
else:
print(
("Dataset loaded from {},"
" Total number of datasets {}.").format(
path, len(self.locs)
)
)
# CREATE GROUP INDEX
if hasattr(locs, "group"):
groups = np.unique(locs.group)
n_groups = len(groups)
n_locs = len(locs)
group_index = scipy.sparse.lil_matrix(
(n_groups, n_locs), dtype=np.bool
)
progress = lib.ProgressDialog(
"Creating group index", 0, len(groups), self
)
progress.set_value(0)
for i, group in enumerate(groups):
index = np.where(locs.group == group)[0]
group_index[i, index] = True
progress.set_value(i + 1)
self.group_index.append(group_index)
self.n_groups = n_groups
os.chdir(os.path.dirname(path))
self.calculate_radii()
self.oversampling = 4
self.updateLayout()
def updateLayout(self):
if len(self.locs) > 0:
pixmap1, pixmap2, pixmap3 = self.hist_multi_channel(self.locs)
self.viewxy.setPixmap(pixmap1)
self.viewxz.setPixmap(pixmap2)
self.viewyz.setPixmap(pixmap3)
def centerofmass_all(self):
# Align all by center of mass
n_channels = len(self.locs)
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
# stack arrays
sel_locs_x = self.locs[j].x
sel_locs_y = self.locs[j].y
sel_locs_z = self.locs[j].z
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
out_locs_x = stack_arrays(out_locs_x, asrecarray=True, usemask=False)
out_locs_y = stack_arrays(out_locs_y, asrecarray=True, usemask=False)
out_locs_z = stack_arrays(out_locs_z, asrecarray=True, usemask=False)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
self.locs[j].x -= mean_x
self.locs[j].y -= mean_y
self.locs[j].z -= mean_z
def calculate_radii(self):
# CALCULATE PROPER R VALUES
n_channels = len(self.locs)
self.r = 0
self.r_z = 0
for j in range(n_channels):
self.r = np.max(
[
3
* np.sqrt(
np.mean(self.locs[j].x ** 2 + self.locs[j].y ** 2)
),
self.r,
]
)
self.r_z = np.max(
[5 * np.sqrt(np.mean(self.locs[j].z ** 2)), self.r_z]
)
self.t_min = -self.r
self.t_max = self.r
self.z_min = -self.r_z
self.z_max = self.r_z
self.z_min_load = self.z_min.copy()
self.z_max_load = self.z_max.copy()
def centerofmass(self):
print("Aligning by center of mass.. ", end="", flush=True)
n_groups = self.n_groups
n_channels = len(self.locs)
progress = lib.ProgressDialog(
"Aligning by center of mass", 0, n_groups, self
)
progress.set_value(0)
for i in range(n_groups):
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
index = self.group_index[j][i, :].nonzero()[1]
# stack arrays
sel_locs_x = self.locs[j].x[index]
sel_locs_y = self.locs[j].y[index]
sel_locs_z = self.locs[j].z[index]
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
progress.set_value(i + 1)
out_locs_x = stack_arrays(
out_locs_x, asrecarray=True, usemask=False
)
out_locs_y = stack_arrays(
out_locs_y, asrecarray=True, usemask=False
)
out_locs_z = stack_arrays(
out_locs_z, asrecarray=True, usemask=False
)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
index = self.group_index[j][i, :].nonzero()[1]
self.locs[j].x[index] -= mean_x
self.locs[j].y[index] -= mean_y
self.locs[j].z[index] -= mean_z
self.calculate_radii()
self.updateLayout()
print("Complete.")
def histtoImage(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap("magma")(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype("uint8")
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(
self.viewxy.width(),
np.round(self.viewxy.height() * Y / X),
QtCore.Qt.KeepAspectRatioByExpanding,
)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def hist_multi_channel(self, locs):
oversampling = self.parameters_dialog.oversampling.value()
self.oversampling = oversampling
if locs is None:
locs = self.locs
n_channels = len(locs)
hues = np.arange(0, 1, 1 / n_channels)
colors = [colorsys.hsv_to_rgb(_, 1, 1) for _ in hues]
renderings = []
for i in range(n_channels):
if self.dataset_dialog.checks[i].isChecked():
renderings.append(
render.render_hist3d(
locs[i],
oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
)
images = np.array([_[1] for _ in renderings])
pixmap1 = self.pixmap_from_colors(images, colors, 2)
pixmap2 = self.pixmap_from_colors(images, colors, 0)
pixmap3 = self.pixmap_from_colors(images, colors, 1)
return pixmap1, pixmap2, pixmap3
def pixmap_from_colors(self, images, colors, axisval):
if axisval == 2:
image = [np.sum(_, axis=axisval) for _ in images]
else:
image = [np.transpose(np.sum(_, axis=axisval)) for _ in images]
image = np.array([self.scale_contrast(_) for _ in image])
Y, X = image.shape[1:]
bgra = np.zeros((Y, X, 4), dtype=np.float32)
for color, image in zip(colors, image):
bgra[:, :, 0] += color[2] * image
bgra[:, :, 1] += color[1] * image
bgra[:, :, 2] += color[0] * image
bgra = np.minimum(bgra, 1)
self._bgra = self.to_8bit(bgra)
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(
self.viewxy.width(),
np.round(self.viewxy.height() * Y / X),
QtCore.Qt.KeepAspectRatioByExpanding,
)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def align_x(self):
print("Align X")
self.align_all("x")
def align_y(self):
print("Align Y")
self.align_all("y")
def align_zz(self):
print("Align Z")
self.align_all("zz")
def align_zy(self):
print("Align Z")
self.align_all("zy")
def translate_x(self):
print("Translate X")
self.translate("x")
def translate_y(self):
print("Translate Y")
self.translate("y")
def translate_z(self):
print("Translate Z")
self.translate("z")
def translate(self, translateaxis):
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
if translateaxis == "x":
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
elif translateaxis == "y":
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=1) for _ in image]
elif translateaxis == "z":
image = [np.sum(_, axis=1) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
for element in signalimg:
plt.plot(element)
n_groups = self.group_index[0].shape[0]
print("Translating..")
for i in tqdm(range(n_groups)):
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.translate_group(signalimg, i, translateaxis)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
self.centerofmass_all()
self.updateLayout()
self.status_bar.showMessage("Done!")
def translate_group(self, signalimg, group, translateaxis):
n_channels = len(self.locs)
all_xcorr = np.zeros((1, n_channels))
all_da = np.zeros((1, n_channels))
if translateaxis == "x":
proplane = "xy"
elif translateaxis == "y":
proplane = "xy"
elif translateaxis == "z":
proplane = "xz"
plotmode = 0
for j in range(n_channels):
if plotmode:
fig = plt.figure()
ax1 = fig.add_subplot(1, 3, 1)
plt.plot(signalimg[j])
ax2 = fig.add_subplot(1, 3, 2)
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
plane = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
) #
if translateaxis == "x":
projection = np.sum(plane, axis=0)
elif translateaxis == "y":
projection = np.sum(plane, axis=1)
elif translateaxis == "z":
projection = np.sum(plane, axis=1)
if plotmode:
plt.plot(projection)
# print('Step X')
# ax3 = fig.add_subplot(1,3,3)
# plt.imshow(plane, interpolation='nearest', cmap=plt.cm.ocean)
corrval = np.max(signal.correlate(signalimg[j], projection))
shiftval = (
np.argmax(signal.correlate(signalimg[j], projection))
- len(signalimg[j])
+ 1
)
all_xcorr[0, j] = corrval
all_da[0, j] = shiftval / self.oversampling
if plotmode:
plt.show()
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
dafinal = np.mean(all_da[maximumcc, :])
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
if translateaxis == "x":
self.locs[j].x[index] += dafinal
elif translateaxis == "y":
self.locs[j].y[index] += dafinal
elif translateaxis == "z":
self.locs[j].z[index] += dafinal * self.pixelsize
def adjust_z(self):
z_range_str = np.asarray((self.z_range.text()).split(","))
z_range = []
for element in z_range_str:
try:
z_range.append(float(element))
except ValueError:
pass
z_min = z_range[0]
z_max = z_range[1]
self.z_min = np.max([z_min, self.z_min_load])
self.z_max = np.min([z_max, self.z_max_load])
print("Z min {}, Z max {}".format(self.z_min, self.z_max))
self.updateLayout()
def adjust_xy(self):
x_range_str = np.asarray((self.x_range.text()).split(","))
x_range = []
for element in x_range_str:
try:
x_range.append(float(element))
except ValueError:
pass
x_min = x_range[0]
x_max = x_range[1]
self.x_min = np.max([x_min, self.t_min])
self.x_max = np.min([x_max, self.t_max])
print("X min {}, X max {}".format(self.x_min, self.x_max))
y_range_str = np.asarray((self.y_range.text()).split(","))
y_range = []
for element in y_range_str:
try:
y_range.append(float(element))
except ValueError:
pass
y_min = y_range[0]
y_max = y_range[1]
self.y_min = np.max([y_min, self.t_min])
self.y_max = np.min([y_max, self.t_max])
print("Y min {}, Y max {}".format(self.y_min, self.y_max))
self.updateLayout()
def rotatexy_convolution_group(
self, CF_image_avg, angles, group, rotaxis, proplane
):
n_channels = len(self.locs)
n_angles = len(angles)
all_xcorr = np.zeros((n_angles, n_channels))
all_da = np.zeros((n_angles, n_channels))
all_db = np.zeros((n_angles, n_channels))
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
if self.translatebtn.isChecked():
angles = [0]
n_angles = 1
for k in range(n_angles):
angle = angles[k]
# rotate locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
angle,
self.pixelsize,
)
# render group image for plane
image = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
)
# calculate cross-correlation
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_aspect("equal")
plt.imshow(
image, interpolation="nearest", cmap=plt.cm.ocean
)
plt.colorbar()
plt.show()
plt.waitforbuttonpress()
xcorr = np.sum(np.multiply(CF_image_avg[j], image))
all_xcorr[k, j] = xcorr
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
rotfinal = angles[maximumcc]
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
rotfinal,
self.pixelsize,
)
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = z_rot
def rotatexy_convolution(self):
# TODO: re-write ths with kwargs at some point
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = "x"
elif self.y_axisbtn.isChecked():
rotaxis = "y"
elif self.z_axisbtn.isChecked():
rotaxis = "z"
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2 * np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(
-degree / 360 * 2 * np.pi, degree / 360 * 2 * np.pi, a_step
)
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
# DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = "xy"
image = [np.sum(_, axis=2) for _ in images]
elif self.yz_projbtn.isChecked():
proplane = "yz"
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif self.xz_projbtn.isChecked():
proplane = "xz"
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
# Change CFiamge for symmetry
if self.radio_sym.isChecked():
print("Using symmetry.")
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for i in range(symmetry - 1):
image[0] += scipy.ndimage.interpolation.rotate(
imageold,
((i + 1) * 360 / symmetry),
axes=(1, 0),
reshape=False,
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.radio_sym_custom.isChecked():
print("Using custom symmetry.")
symmetry_txt = np.asarray((self.symcustomEdit.text()).split(","))
print(symmetry_txt)
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for degree in symmetry_txt:
image[0] += scipy.ndimage.interpolation.rotate(
imageold, float(degree), axes=(1, 0), reshape=False
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.modelchk.isChecked():
self.generate_template()
image[0] = self.template_img
CF_image_avg = image
print("Convolving..")
for i in tqdm(range(n_groups)):
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.rotatexy_convolution_group(
CF_image_avg, angles, i, rotaxis, proplane
)
self.updateLayout()
self.status_bar.showMessage("Done!")
def rotate_groups(self):
# Read out values from radiobuttons
# TODO: maybe re-write this with kwargs
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = "x"
elif self.y_axisbtn.isChecked():
rotaxis = "y"
elif self.z_axisbtn.isChecked():
rotaxis = "z"
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2 * np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(
-degree / 360 * 2 * np.pi, degree / 360 * 2 * np.pi, a_step
)
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
# DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = "xy"
image = [np.sum(_, axis=2) for _ in images]
elif self.yz_projbtn.isChecked():
proplane = "yz"
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif self.xz_projbtn.isChecked():
proplane = "xz"
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
if self.radio_sym.isChecked():
print("Radio sym")
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for i in range(symmetry - 1):
image[0] += scipy.ndimage.interpolation.rotate(
imageold,
((i + 1) * 360 / symmetry),
axes=(1, 0),
reshape=False,
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
# TODO: Sort these functions out,
# combine with radio_sym / also for convolving.
if self.radio_sym_custom.isChecked():
print("Using custom symmetry.")
symmetry_txt = np.asarray((self.symcustomEdit.text()).split(","))
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for degree in symmetry_txt:
image[0] += scipy.ndimage.interpolation.rotate(
imageold, float(degree), axes=(1, 0), reshape=False
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.modelchk.isChecked():
self.generate_template()
image[0] = self.template_img
CF_image_avg = [np.conj(np.fft.fft2(_)) for _ in image]
# n_pixel, _ = image_avg.shape
# image_half = n_pixel / 2
print("Rotating..")
for i in tqdm(range(n_groups)):
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.align_group(CF_image_avg, angles, i, rotaxis, proplane)
self.updateLayout()
self.status_bar.showMessage("Done!")
def getUIstate(self):
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = "x"
elif self.y_axisbtn.isChecked():
rotaxis = "y"
elif self.z_axisbtn.isChecked():
rotaxis = "z"
proplane = []
if self.xy_projbtn.isChecked():
proplane = "xy"
elif self.yz_projbtn.isChecked():
proplane = "yz"
elif self.xz_projbtn.isChecked():
proplane = "xz"
return rotaxis, proplane
def projectPlanes(self, images, proplane):
if proplane == "xy":
image = [np.sum(_, axis=2) for _ in images]
elif proplane == "yz":
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif proplane == "xz":
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
return image
def generate_template(self):
model_x_str = np.asarray((self.model_x.text()).split(","))
model_y_str = np.asarray((self.model_y.text()).split(","))
model_z_str = np.asarray((self.model_z.text()).split(","))
model_x = []
model_y = []
model_z = []
for element in model_x_str:
try:
model_x.append(float(element))
except ValueError:
pass
for element in model_y_str:
try:
model_y.append(float(element))
except ValueError:
pass
for element in model_z_str:
try:
model_z.append(float(element))
except ValueError:
pass
pixelsize = self.pixelsizeEdit.value()
blur = self.modelblurEdit.value()
# Center of mass
model_x = np.array(model_x) / pixelsize
model_y = np.array(model_y) / pixelsize
model_z = np.array(model_z)
model_x = model_x - np.mean(model_x)
model_y = model_y - np.mean(model_y)
model_z = model_z - np.mean(model_z)
rotaxis, proplane = self.getUIstate()
template_img = self.render_planes(
model_x, model_y, model_z, proplane, pixelsize
)
self.template_img = scipy.ndimage.filters.gaussian_filter(
template_img, blur
)
def model_preview(self):
self.generate_template()
# Generate a template image
fig = plt.figure()
plt.title("Preview of Template")
plt.imshow(self.template_img, interpolation="nearest", cmap=plt.cm.hot)
plt.show()
def calculate_score(self):
# Dummy button -> Functionality of rotatebtn for now
# TODO: maybe re-write this with kwargs
self.scores = []
rotaxis, proplane = self.getUIstate()
n_groups = self.group_index[0].shape[0]
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
n_locs = sum([_[0] for _ in renderings])
# Make an average and not a sum image here..
images = np.array([_[1] / n_groups for _ in renderings])
# DELIVER CORRECT PROJECTION FOR IMAGE
image = self.projectPlanes(images, proplane)
n_channels = len(image)
print("Calculating score..")
for i in tqdm(range(n_groups)):
channel_score = []
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][i].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
groupimage = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
)
score = np.sum(np.sqrt(groupimage * image[j])) / np.sum(
np.sqrt(groupimage * groupimage)
)
channel_score.append(score)
self.scores.append(channel_score)
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.status_bar.showMessage(
"Done. Average score: {}".format(np.mean(self.scores))
)
plt.hist(np.array(self.scores), 40)
plt.title(
"Histogram of Scores, Mean: {:.2f}".format(np.mean(self.scores))
)
plt.xlabel("Score")
plt.ylabel("Counts")
plt.show()
def mean_angle(self, deg):
return phase(sum(rect(1, d) for d in deg) / len(deg))
def render_planes(self, xdata, ydata, zdata, proplane, pixelsize):
# assign correct renderings for all planes
a_render = []
b_render = []
if proplane == "xy":
a_render = xdata
b_render = ydata
aval_min = self.t_min
aval_max = self.t_max
bval_min = self.t_min
bval_max = self.t_max
elif proplane == "yz":
a_render = ydata
b_render = np.divide(zdata, pixelsize)
aval_min = self.t_min
aval_max = self.t_max
bval_min = np.divide(self.z_min, pixelsize)
bval_max = np.divide(self.z_max, pixelsize)
elif proplane == "xz":
b_render = np.divide(zdata, pixelsize)
a_render = xdata
bval_min = np.divide(self.z_min, pixelsize)
bval_max = np.divide(self.z_max, pixelsize)
aval_min = self.t_min
aval_max = self.t_max
N, plane = render_histxyz(
a_render,
b_render,
self.oversampling,
aval_min,
aval_max,
bval_min,
bval_max,
)
return plane
def align_all(self, alignaxis):
a_step = np.arcsin(1 / (self.oversampling * self.r))
angles = np.arange(0, 2 * np.pi, a_step)
n_channels = len(self.locs)
n_angles = len(angles)
all_corr = np.zeros((n_angles, n_channels))
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
alignimage = []
x_rot = self.locs[j].x
y_rot = self.locs[j].y
z_rot = self.locs[j].z
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
alignimage = []
for k in range(n_angles):
angle = angles[k]
if alignaxis == "zz":
proplane = "yz"
rotaxis = "x"
elif alignaxis == "zy":
proplane = "yz"
rotaxis = "x"
elif alignaxis == "y":
proplane = "xy"
rotaxis = "z"
elif alignaxis == "x":
proplane = "xy"
rotaxis = "z"
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
angle,
self.pixelsize,
)
# render group image for plane
image = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
) # RENDR PLANES WAS BUGGY AT SOME POINT
if alignimage == []:
alignimage = np.zeros(image.shape)
# CREATE ALIGNIMAGE
if alignaxis == "zz":
alignimage[np.int(alignimage.shape[0] / 2), :] += 2
alignimage[
np.int(alignimage.shape[0] / 2) + 1, :
] += 1
alignimage[
np.int(alignimage.shape[0] / 2) - 1, :
] += 1
elif alignaxis == "zy":
alignimage[:, np.int(alignimage.shape[0] / 2)] += 2
alignimage[
:, np.int(alignimage.shape[0] / 2) + 1
] += 1
alignimage[
:, np.int(alignimage.shape[0] / 2) - 1
] += 1
elif alignaxis == "y":
alignimage[:, np.int(alignimage.shape[1] / 2)] += 2
alignimage[
:, np.int(alignimage.shape[1] / 2) - 1
] += 1
alignimage[
:, np.int(alignimage.shape[1] / 2) + 1
] += 1
elif alignaxis == "x":
alignimage[np.int(alignimage.shape[0] / 2), :] += 2
alignimage[
np.int(alignimage.shape[0] / 2) + 1, :
] += 1
alignimage[
np.int(alignimage.shape[0] / 2) - 1, :
] += 1
all_corr[k, j] = np.sum(np.multiply(alignimage, image))
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_aspect("equal")
plt.imshow(
image, interpolation="nearest", cmap=plt.cm.ocean
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(
alignimage,
interpolation="nearest",
cmap=plt.cm.ocean,
)
plt.colorbar()
plt.show()
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_corr, axis=1))
rotfinal = angles[maximumcc]
for j in range(n_channels):
x_rot = self.locs[j].x
y_rot = self.locs[j].y
z_rot = self.locs[j].z
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
rotfinal,
self.pixelsize,
)
self.locs[j].x = x_rot
self.locs[j].y = y_rot
self.locs[j].z = z_rot
self.updateLayout()
self.status_bar.showMessage(
"Align on Axis {} complete.".format(alignaxis)
)
def align_group(self, CF_image_avg, angles, group, rotaxis, proplane):
n_channels = len(self.locs)
n_angles = len(angles)
all_xcorr = np.zeros((n_angles, n_channels))
all_da = np.zeros((n_angles, n_channels))
all_db = np.zeros((n_angles, n_channels))
flips = 1
if self.flipbtn.isChecked():
print("Considering flipped structures...")
flips = 2
for f in range(flips):
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
if f == 1: # Flipped round
if proplane == "xy":
x_original = -x_original
elif proplane == "yz":
y_original = -y_original
elif proplane == "xz":
z_original = -z_original
if self.translatebtn.isChecked():
angles = [0]
n_angles = 1
for k in range(n_angles):
angle = angles[k]
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
angle,
self.pixelsize,
)
# render group image for plane
image = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
) # RENDR PLANES WAS BUGGY AT SOME POINT
# calculate cross-correlation
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_aspect("equal")
plt.imshow(
image,
interpolation="nearest",
cmap=plt.cm.ocean,
)
plt.colorbar()
plt.show()
plt.waitforbuttonpress()
xcorr = compute_xcorr(CF_image_avg[j], image)
n_pixelb, n_pixela = image.shape
image_halfa = n_pixela / 2 # TODO: CHECK THOSE VALUES
image_halfb = n_pixelb / 2
# find the brightest pixel
b_max, a_max = np.unravel_index(
xcorr.argmax(), xcorr.shape
)
# store the transformation if the correlation
# is larger than before
all_xcorr[k, j] = xcorr[b_max, a_max]
all_db[k, j] = (
| np.ceil(b_max - image_halfb) | numpy.ceil |
import json
import math
import numbers
import random
from abc import ABCMeta, abstractmethod
from collections import Iterable, OrderedDict
from functools import partial
from typing import List
import numpy as np
import torch
from . import functional as F
class Sequence:
def __init__(self, transforms):
if not isinstance(transforms, List):
transforms = [transforms]
self.transforms = transforms
def __call__(self, data):
for t in self.transforms:
data = t(data)
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
def to_dict(self):
return OrderedDict([("sequence", OrderedDict([t.to_dict().popitem() for t in self.transforms]))])
def to_json(self, indent=4):
return json.dumps(self.to_dict(), indent=indent)
class Shuffle:
def __init__(self, transforms):
if not isinstance(transforms, List):
transforms = [transforms]
self.transforms = transforms
def __call__(self, data):
random.shuffle(self.transforms)
for t in self.transforms:
data = t(data)
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
def to_dict(self):
return OrderedDict([("shuffle", OrderedDict([t.to_dict().popitem() for t in self.transforms]))])
def to_json(self, indent=4):
return json.dumps(self.to_dict(), indent=indent)
class Sample:
def __init__(self, transforms, k=1):
if not isinstance(transforms, List):
transforms = [transforms]
assert len(transforms) >= k
self.transforms = transforms
self.k = k
def __call__(self, data):
transforms = random.sample(self.transforms, k=self.k)
for t in transforms:
data = t(data)
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
def to_dict(self):
return OrderedDict([("sample", OrderedDict([t.to_dict().popitem() for t in self.transforms]))])
def to_json(self, indent=4):
return json.dumps(self.to_dict(), indent=indent)
class Transform(metaclass=ABCMeta):
@abstractmethod
def apply_image(self, img):
pass
def apply_mask(self, mask):
raise NotImplementedError
def apply_box(self, box):
raise NotImplementedError
def apply_kp(self, kp):
raise NotImplementedError
def __repr__(self):
format_string = self.__class__.__name__
return format_string
@abstractmethod
def to_dict(self):
pass
class ToTensor(Transform):
def __call__(self, data):
if isinstance(data, dict):
for k, v in data.items():
if v is not None:
data[k] = getattr(self, f'apply_{k}')(v)
else:
data = self.apply_image(data)
return data
def apply_image(self, img):
return F.to_tensor(img)
def apply_mask(self, mask):
if isinstance(mask, List):
return torch.stack([torch.as_tensor( | np.array(m) | numpy.array |
import numpy as np
import scipy.constants as const
import json
import os
from matplotlib import pyplot as plt
import ckvpy.tools.photon_yield as photon_yield
import ckvpy.tools.effective as effective
from ckvpy.tools.analysis import dataAnalysis
class dataAnalysis3D(dataAnalysis):
"""Data analysis class for 3D models, requires intersection of electron
plane and dispersion to find cherenkov angle etc."""
def __init__(self, data):
self.data_full = data # includes full Brillouin zone data
self.data_dict = {} # same structure as in 2D case
self._init_data_dict(data)
self._get_num_bands()
self.status = {
'reflected': True,
'interpolated': True,
'intersected': False
}
def _init_data_dict(self, data):
self.data_dict = {}
for root in self.data_full:
self.data_dict[root] = {}
for band in self.data_full[root]:
self.data_dict[root][band] = {}
def calculateCherenkov(self, beta=0.999, direction = [1,0]):
"""Find intersection of electron plane and dispersion to get
Cherenkov behaviour
Args:
beta (float): electron speed ratio with c
direction (list): determines direction of electron with idices
rho (|x,y|) and z which defines e-plane omega = k.v
"""
if type(direction[0]) is not int or type(direction[1]) is not int:
raise ValueError("Only directions purely in z or rho supported")
for band in self.data_full['default']:
m_rho = self.data_full['default'][band]['mi'] # matrix of k_rho values
mz = np.copy(self.data_full['default'][band]['mz']) # mutated so copy
my = self.data_full['default'][band]['my'] # matrix of ky values
mx = self.data_full['default'][band]['mx'] # matrix of kx values
mf = np.copy(self.data_full['default'][band]['mf']) # mutated so copy
z_array = mz.T[0][-1:1:-1] # starts at maximum
rho_array = m_rho[0][1:-1] # cut off edges (interp)
# e_plane = self.data_dict['default'][band]['mj']*3.e8*v
mf *= 2*np.pi # omega=2pif
mf = mf.T # since we transpose z to get z array from columns
self.data_dict['default'][band] = \
{'kz': [None], 'k_rho': [None], 'frequency': [None], 'direction': direction}
kz_c = np.array([]) # empty temp arrays to store crossing points
k_rho_c = np.array([])
f_c = np.array([])
for kz_i, kz in enumerate(z_array[:-1]): # ith value of kz
for k_rho_i, k_rho in enumerate(rho_array[:-1]): # jth k_rho
kz2 = z_array[kz_i + 1] # i+1th value of kz
k_rho2 = rho_array[k_rho_i + 1] # j+1th k_rho
f = mf[kz_i, k_rho_i] # f(kz,k_rho)
fz2 = mf[kz_i + 1, k_rho_i] # f(kz2,k_rho)
f_rho2 = mf[kz_i, k_rho_i + 1] # f(kz,k_rho2)
# get crossing points and booleans (was crossing found?)
rho_found, rho_cross, z_found, z_cross = \
self._cross(beta, kz, kz2, k_rho, k_rho2, f, fz2,
f_rho2, direction)
k_rho_cross, f_rho_cross = rho_cross
kz_cross, fz_cross = z_cross
if z_found: # crossing found in kz direction
kz_c = np.append(kz_c, kz_cross)
k_rho_c = | np.append(k_rho_c, k_rho) | numpy.append |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests and integration tests for the ``default.qubit.tf`` device.
"""
from itertools import product
import numpy as np
import pytest
tf = pytest.importorskip("tensorflow", minversion="2.0")
import pennylane as qml
from pennylane.wires import Wires
from pennylane.devices.default_qubit_tf import DefaultQubitTF
from gate_data import (
I,
X,
Y,
Z,
H,
S,
T,
CNOT,
CZ,
SWAP,
CNOT,
Toffoli,
CSWAP,
Rphi,
Rotx,
Roty,
Rotz,
Rot3,
CRotx,
CRoty,
CRotz,
CRot3,
MultiRZ1,
MultiRZ2,
)
np.random.seed(42)
#####################################################
# Test matrices
#####################################################
U = np.array(
[
[0.83645892 - 0.40533293j, -0.20215326 + 0.30850569j],
[-0.23889780 - 0.28101519j, -0.88031770 - 0.29832709j],
]
)
U2 = np.array([[0, 1, 1, 1], [1, 0, 1, -1], [1, -1, 0, 1], [1, 1, -1, 0]]) / np.sqrt(3)
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
#####################################################
# Define standard qubit operations
#####################################################
single_qubit = [(qml.S, S), (qml.T, T), (qml.PauliX, X), (qml.PauliY, Y), (qml.PauliZ, Z), (qml.Hadamard, H)]
single_qubit_param = [(qml.PhaseShift, Rphi), (qml.RX, Rotx), (qml.RY, Roty), (qml.RZ, Rotz), (qml.MultiRZ, MultiRZ1)]
two_qubit = [(qml.CZ, CZ), (qml.CNOT, CNOT), (qml.SWAP, SWAP)]
two_qubit_param = [(qml.CRX, CRotx), (qml.CRY, CRoty), (qml.CRZ, CRotz), (qml.MultiRZ, MultiRZ2)]
three_qubit = [(qml.Toffoli, Toffoli), (qml.CSWAP, CSWAP)]
#####################################################
# Fixtures
#####################################################
@pytest.fixture
def init_state(scope="session"):
"""Generates a random initial state"""
def _init_state(n):
"""random initial state"""
state = np.random.random([2 ** n]) + np.random.random([2 ** n]) * 1j
state /= np.linalg.norm(state)
return state
return _init_state
#####################################################
# Device-level integration tests
#####################################################
class TestApply:
"""Test application of PennyLane operations."""
def test_basis_state(self, tol):
"""Test basis state initialization"""
dev = DefaultQubitTF(wires=4)
state = np.array([0, 0, 1, 0])
dev.apply([qml.BasisState(state, wires=[0, 1, 2, 3])])
res = dev.state
expected = np.zeros([2 ** 4])
expected[np.ravel_multi_index(state, [2] * 4)] = 1
assert isinstance(res, tf.Tensor)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_invalid_basis_state_length(self, tol):
"""Test that an exception is raised if the basis state is the wrong size"""
dev = DefaultQubitTF(wires=4)
state = np.array([0, 0, 1, 0])
with pytest.raises(
ValueError, match=r"BasisState parameter and wires must be of equal length"
):
dev.apply([qml.BasisState(state, wires=[0, 1, 2])])
def test_invalid_basis_state(self, tol):
"""Test that an exception is raised if the basis state is invalid"""
dev = DefaultQubitTF(wires=4)
state = np.array([0, 0, 1, 2])
with pytest.raises(
ValueError, match=r"BasisState parameter must consist of 0 or 1 integers"
):
dev.apply([qml.BasisState(state, wires=[0, 1, 2, 3])])
def test_qubit_state_vector(self, init_state, tol):
"""Test qubit state vector application"""
dev = DefaultQubitTF(wires=1)
state = init_state(1)
dev.apply([qml.QubitStateVector(state, wires=[0])])
res = dev.state
expected = state
assert isinstance(res, tf.Tensor)
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_full_subsystem_statevector(self, mocker):
"""Test applying a state vector to the full subsystem"""
dev = DefaultQubitTF(wires=['a', 'b', 'c'])
state = tf.constant([1, 0, 0, 0, 1, 0, 1, 1], dtype=tf.complex128) / 2.
state_wires = qml.wires.Wires(['a', 'b', 'c'])
spy = mocker.spy(dev, "_scatter")
dev._apply_state_vector(state=state, device_wires=state_wires)
assert np.all(tf.reshape(dev._state, [-1]) == state)
spy.assert_not_called()
def test_partial_subsystem_statevector(self, mocker):
"""Test applying a state vector to a subset of wires of the full subsystem"""
dev = DefaultQubitTF(wires=['a', 'b', 'c'])
state = tf.constant([1, 0, 1, 0], dtype=tf.complex128) / np.sqrt(2.)
state_wires = qml.wires.Wires(['a', 'c'])
spy = mocker.spy(dev, "_scatter")
dev._apply_state_vector(state=state, device_wires=state_wires)
res = tf.reshape(tf.reduce_sum(dev._state, axis=(1,)), [-1])
assert np.all(res == state)
spy.assert_called()
def test_invalid_qubit_state_vector_size(self):
"""Test that an exception is raised if the state
vector is the wrong size"""
dev = DefaultQubitTF(wires=2)
state = np.array([0, 1])
with pytest.raises(ValueError, match=r"State vector must be of length 2\*\*wires"):
dev.apply([qml.QubitStateVector(state, wires=[0, 1])])
def test_invalid_qubit_state_vector_norm(self):
"""Test that an exception is raised if the state
vector is not normalized"""
dev = DefaultQubitTF(wires=2)
state = np.array([0, 12])
with pytest.raises(ValueError, match=r"Sum of amplitudes-squared does not equal one"):
dev.apply([qml.QubitStateVector(state, wires=[0])])
def test_invalid_state_prep(self):
"""Test that an exception is raised if a state preparation is not the
first operation in the circuit."""
dev = DefaultQubitTF(wires=2)
state = np.array([0, 12])
with pytest.raises(
qml.DeviceError,
match=r"cannot be used after other Operations have already been applied",
):
dev.apply([qml.PauliZ(0), qml.QubitStateVector(state, wires=[0])])
@pytest.mark.parametrize("op,mat", single_qubit)
def test_single_qubit_no_parameters(self, init_state, op, mat, tol):
"""Test non-parametrized single qubit operations"""
dev = DefaultQubitTF(wires=1)
state = init_state(1)
queue = [qml.QubitStateVector(state, wires=[0])]
queue += [op(wires=0)]
dev.apply(queue)
res = dev.state
expected = mat @ state
assert isinstance(res, tf.Tensor)
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("theta", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", single_qubit_param)
def test_single_qubit_parameters(self, init_state, op, func, theta, tol):
"""Test parametrized single qubit operations"""
dev = DefaultQubitTF(wires=1)
state = init_state(1)
queue = [qml.QubitStateVector(state, wires=[0])]
queue += [op(theta, wires=0)]
dev.apply(queue)
res = dev.state
expected = func(theta) @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_rotation(self, init_state, tol):
"""Test three axis rotation gate"""
dev = DefaultQubitTF(wires=1)
state = init_state(1)
a = 0.542
b = 1.3432
c = -0.654
queue = [qml.QubitStateVector(state, wires=[0])]
queue += [qml.Rot(a, b, c, wires=0)]
dev.apply(queue)
res = dev.state
expected = Rot3(a, b, c) @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_controlled_rotation(self, init_state, tol):
"""Test three axis controlled-rotation gate"""
dev = DefaultQubitTF(wires=2)
state = init_state(2)
a = 0.542
b = 1.3432
c = -0.654
queue = [qml.QubitStateVector(state, wires=[0, 1])]
queue += [qml.CRot(a, b, c, wires=[0, 1])]
dev.apply(queue)
res = dev.state
expected = CRot3(a, b, c) @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_inverse_operation(self, init_state, tol):
"""Test that the inverse of an operation is correctly applied"""
"""Test three axis rotation gate"""
dev = DefaultQubitTF(wires=1)
state = init_state(1)
a = 0.542
b = 1.3432
c = -0.654
queue = [qml.QubitStateVector(state, wires=[0])]
queue += [qml.Rot(a, b, c, wires=0).inv()]
dev.apply(queue)
res = dev.state
expected = np.linalg.inv(Rot3(a, b, c)) @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("op,mat", two_qubit)
def test_two_qubit_no_parameters(self, init_state, op, mat, tol):
"""Test non-parametrized two qubit operations"""
dev = DefaultQubitTF(wires=2)
state = init_state(2)
queue = [qml.QubitStateVector(state, wires=[0, 1])]
queue += [op(wires=[0, 1])]
dev.apply(queue)
res = dev.state
expected = mat @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("mat", [U, U2])
def test_qubit_unitary(self, init_state, mat, tol):
"""Test application of arbitrary qubit unitaries"""
N = int(np.log2(len(mat)))
dev = DefaultQubitTF(wires=N)
state = init_state(N)
queue = [qml.QubitStateVector(state, wires=range(N))]
queue += [qml.QubitUnitary(mat, wires=range(N))]
dev.apply(queue)
res = dev.state
expected = mat @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("op, mat", three_qubit)
def test_three_qubit_no_parameters(self, init_state, op, mat, tol):
"""Test non-parametrized three qubit operations"""
dev = DefaultQubitTF(wires=3)
state = init_state(3)
queue = [qml.QubitStateVector(state, wires=[0, 1, 2])]
queue += [op(wires=[0, 1, 2])]
dev.apply(queue)
res = dev.state
expected = mat @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("theta", [0.5432, -0.232])
@pytest.mark.parametrize("op,func", two_qubit_param)
def test_two_qubit_parameters(self, init_state, op, func, theta, tol):
"""Test two qubit parametrized operations"""
dev = DefaultQubitTF(wires=2)
state = init_state(2)
queue = [qml.QubitStateVector(state, wires=[0, 1])]
queue += [op(theta, wires=[0, 1])]
dev.apply(queue)
res = dev.state
expected = func(theta) @ state
assert np.allclose(res, expected, atol=tol, rtol=0)
def test_apply_ops_not_supported(self, mocker, monkeypatch):
"""Test that when a version of TensorFlow before 2.3.0 is used, the _apply_ops dictionary is
empty and application of a CNOT gate is performed using _apply_unitary_einsum"""
with monkeypatch.context() as m:
m.setattr("pennylane.devices.default_qubit_tf.SUPPORTS_APPLY_OPS", False)
dev = DefaultQubitTF(wires=3)
assert dev._apply_ops == {}
spy = mocker.spy(DefaultQubitTF, "_apply_unitary_einsum")
queue = [qml.CNOT(wires=[1, 2])]
dev.apply(queue)
spy.assert_called_once()
def test_apply_ops_above_8_wires(self, mocker):
"""Test that when 9 wires are used, the _apply_ops dictionary is empty and application of a
CNOT gate is performed using _apply_unitary_einsum"""
dev = DefaultQubitTF(wires=9)
assert dev._apply_ops == {}
spy = mocker.spy(DefaultQubitTF, "_apply_unitary_einsum")
queue = [qml.CNOT(wires=[1, 2])]
dev.apply(queue)
spy.assert_called_once()
@pytest.mark.xfail(
raises=tf.errors.UnimplementedError,
reason="Slicing is not supported for more than 8 wires",
strict=True,
)
def test_apply_ops_above_8_wires_using_special(self):
"""Test that special apply methods that involve slicing function correctly when using 9
wires"""
dev = DefaultQubitTF(wires=9)
dev._apply_ops = {"CNOT": dev._apply_cnot}
queue = [qml.CNOT(wires=[1, 2])]
dev.apply(queue)
THETA = np.linspace(0.11, 1, 3)
PHI = | np.linspace(0.32, 1, 3) | numpy.linspace |
"""Plotting for manuscript figures."""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import fit
from models import calc_cs, calc_ffs, calc_ge_gm, calc_rho, dipole_ffs, get_b2, hbarc
matplotlib.rcParams["text.usetex"] = True
matplotlib.rcParams["font.size"] = 13
matplotlib.rcParams["font.family"] = "lmodern"
matplotlib.rcParams["text.latex.preamble"] = r"\usepackage{lmodern}"
matplotlib.rcParams["xtick.labelsize"] = 12
matplotlib.rcParams["ytick.labelsize"] = 12
# Number of samples to use when generating statistical uncertainty bands
N_SAMPLES = 1000
def read_Rosenbluth_data():
"""Read data for G_E and G_M from "Rosenbluth.dat"."""
col_names = ["Q2", "GE", "delta_GE", "GM", "delta_GM"]
data = pd.read_csv("data/Rosenbluth.dat", sep=" ", skiprows=5, names=col_names)
return data
def calc_interval(calc_func, x_range, param_list, order):
"""Calculate 68% ("1 sigma") percentile interval from param sample."""
out = np.array([calc_func(x_range, param, order) for param in param_list])
return np.percentile(out, (15.9, 84.1), 0)
def calc_params(data, order, reg_param):
"""Run fit and get model parameters and covariance."""
params, _, _, _, cov = fit.fit(data, data, order, reg_param)
params = params[fit.N_NORM_PARAMS :]
cov = cov[fit.N_NORM_PARAMS :, fit.N_NORM_PARAMS :]
return params, cov
def calc_sys_bands(calc_func, x_range, data, order, reg_param):
"""Calculate systematic error bands for given quantity."""
params, _ = calc_params(data, order, reg_param)
f1, f2 = calc_func(x_range, params, order)
mincut_params = fit.fit_systematic_variant("cs_mincut", data, order, reg_param)[0]
maxcut_params = fit.fit_systematic_variant("cs_maxcut", data, order, reg_param)[0]
sysup_params = fit.fit_systematic_variant("cs_sysup", data, order, reg_param)[0]
syslow_params = fit.fit_systematic_variant("cs_syslow", data, order, reg_param)[0]
mincut_f1, mincut_f2 = calc_func(x_range, mincut_params, order)
maxcut_f1, maxcut_f2 = calc_func(x_range, maxcut_params, order)
sysup_f1, sysup_f2 = calc_func(x_range, sysup_params, order)
syslow_f1, syslow_f2 = calc_func(x_range, syslow_params, order)
# Calculate upper and lower limits for each of the systematic variations:
f1_cut_up = np.clip(np.max(np.stack([mincut_f1 - f1, maxcut_f1 - f1]), 0), 0, None)
f1_cut_low = np.clip(np.min(np.stack([mincut_f1 - f1, maxcut_f1 - f1]), 0), None, 0)
f1_sys_up = np.clip(np.max(np.stack([sysup_f1 - f1, syslow_f1 - f1]), 0), 0, None)
f1_sys_low = np.clip(np.min(np.stack([sysup_f1 - f1, syslow_f1 - f1]), 0), None, 0)
f2_cut_up = np.clip(np.max(np.stack([mincut_f2 - f2, maxcut_f2 - f2]), 0), 0, None)
f2_cut_low = np.clip(np.min(np.stack([mincut_f2 - f2, maxcut_f2 - f2]), 0), None, 0)
f2_sys_up = np.clip(np.max(np.stack([sysup_f2 - f2, syslow_f2 - f2]), 0), 0, None)
f2_sys_low = np.clip(np.min(np.stack([sysup_f2 - f2, syslow_f2 - f2]), 0), None, 0)
# Add two systematic "errors" in quadrature:
f1_up = np.sqrt(f1_cut_up ** 2 + f1_sys_up ** 2)
f1_low = np.sqrt(f1_cut_low ** 2 + f1_sys_low ** 2)
f2_up = np.sqrt(f2_cut_up ** 2 + f2_sys_up ** 2)
f2_low = np.sqrt(f2_cut_low ** 2 + f2_sys_low ** 2)
return f1_up, f1_low, f2_up, f2_low
def fill_between(x_range, y_up, y_low, color, hbarc_scale=False):
"""Plot confidence interval."""
if hbarc_scale:
x_range = hbarc * x_range
y_up = y_up / (hbarc * hbarc)
y_low = y_low / (hbarc * hbarc)
plt.fill_between(x_range, y_up, y_low, color=color, lw=0, alpha=0.7)
def plot_f1_f2(data, order, reg_param):
"""Plot the Dirac and Pauli form factors."""
params, cov = calc_params(data, order, reg_param)
Q2_range = np.linspace(0, 1, 100)
F1, F2 = calc_ffs(Q2_range, params, order)
# Transverse charge radius and the slope of F1:
b2, _ = get_b2(params, cov)
slope_x = np.linspace(0, 0.15, 10)
slope_y = 1 - slope_x * b2 / 4
# Plot the form factor slope:
plt.plot(slope_x, slope_y, ls="--", color="black", lw=1)
if fit.covariance_bad(cov):
print("Warning: Covariance ill-conditioned, will not plot confidence intervals")
draw_confidence = False
else:
draw_confidence = True
if draw_confidence:
# Calculate statistical uncertainties:
params = | np.random.multivariate_normal(params, cov, size=N_SAMPLES) | numpy.random.multivariate_normal |
import numpy as np
from scipy import stats, special
import matplotlib.pyplot as plt
from math import sqrt, factorial
exercise = 15
#################################################################
# exercise 2.2
if exercise == 2:
x_axis = np.linspace(0,300, 10000)
plt.plot(x_axis, stats.norm.pdf(x_axis, 150, sqrt(150)), color="black")
plt.xlabel(r"$\hat{\nu}$")
plt.ylabel(r"f($\hat{\nu}$)")
plt.xlim(100,200)
plt.ylim(0, 0.035)
plt.xticks(np.arange(100,210,10))
plt.show()
#################################################################
# exercise 2.3
if exercise == 3:
q = 150 # estimator from the first experiment, we use it as "true" value for the following MC's
# we now genetare 10^6 MC experiments from a Poisson pdf. There is
# a module, so-called numpy.random, which implements pseudo-random number generators for various distributions.
# since we want to generate numbers Poisson distributed, we can directly use this module.
poisson_data = np.random.poisson(q, 10**6)
print(poisson_data)
print(len(poisson_data))
# the obtained values are directly N_i.
# Now we compute the estimator for each one, which in fact corresponds to q_i = N_i
# so we don't have to make any change
plt.hist(poisson_data, bins= 30, normed=True, histtype="step", color="black")
# we compare it with the result in exercise 2.2
x_axis = np.linspace(0,300, 10000)
plt.plot(x_axis, stats.norm.pdf(x_axis, 150, sqrt(150)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (150,150)$")
# we compute the estimators of the mean and variance from poisson_data
mean_estimator = np.mean(poisson_data)
variance_estimator = np.var(poisson_data, ddof=1)
print(mean_estimator, variance_estimator)
plt.xlabel(r"$\hat{\nu}$")
plt.ylabel(r"N($\hat{\nu}$)")
plt.xlim(100,200)
plt.ylim(0, 0.035)
plt.xticks(np.arange(100,210,10))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
#################################################################
# exercise 2.4
if exercise == 4:
N = 150
nu = np.linspace(100,200,100000)
# posterior_pdf = ((nu**N)*np.exp(-nu))/special.gamma(N+1)
# be careful here. This last expresion overflows when n=113 since result exp(308)
# Remember that Max normal number with 64 bit floating point is 1.7x10308
# so we have to redefine the calculations
posterior_pdf = ((nu**(N/2))*(np.exp(-nu))*(nu**(N/2)))/special.gamma(N+1)
plt.plot(nu, posterior_pdf, color = "black", linewidth= "1", label = r"P($\nu$|150)")
plt.plot(nu, stats.norm.pdf(nu, 150, sqrt(150)), color="red", linewidth= "1",
label=r"$\mathcal{N} \ (150,150)$")
print(np.argmax(posterior_pdf))
print(nu[np.argmax(posterior_pdf)])
plt.xlabel(r"$\nu$")
plt.ylabel(r"P($\nu$|150)")
plt.xlim(100,200)
plt.ylim(0, 0.035)
plt.xticks(np.arange(100,210,10))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
#################################################################
# exercise 2.5
if exercise == 5:
N_obs=150
nu = np.random.uniform(0.0, N_obs + 10.0*sqrt(N_obs), size=10**7) # we generate nu that satisfies our prior
# N = np.array([])
count = 0
nu_accepted = np.array([])
for nu_i in nu:
N = np.random.poisson(nu_i, 1) # we generate one N for each value of nu
if N == N_obs:
nu_accepted = np.append(nu_accepted, nu_i)
count += 1
print(count)
plt.figure(1)
plt.hist(nu_accepted, bins= 30, normed=True, histtype="step", color="black")
# we compare it with the result in exercise 2.2
x_axis = np.linspace(0,300, 10000)
plt.plot(x_axis, stats.norm.pdf(x_axis, 150, sqrt(150)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (150,150)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(100,200)
plt.ylim(0, 0.035)
plt.xticks(np.arange(100,210,10))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
# we compute the estimators of the mean and variance from poisson_data
mean_estimator = np.mean(nu_accepted)
variance_estimator = np.var(nu_accepted, ddof=1)
print(mean_estimator, variance_estimator)
print("{0:.2f},{1:.2f}".format(mean_estimator, variance_estimator))
# mean = 150.91 variance 150.56
# 151.06,151.09
# 150.95,152.57
# 151.04,151.19
# revise exercise 2.4: find mode and verify if its 150 o 151
plt.figure(2)
plt.hist(nu_accepted, bins= 30, normed=True, histtype="step", color="black")
plt.plot(x_axis, stats.norm.pdf(x_axis, mean_estimator, sqrt(variance_estimator)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (151,151)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(100,200)
plt.ylim(0, 0.035)
plt.xticks(np.arange(100,210,10))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
#################################################################
# exercise 2.6 (if 6 -> repeat ex 2.4. if 7 ex -> 7 repeat ex 2.5)
if exercise == 6:
N = 10
nu = np.linspace(0,50,100000)
# posterior_pdf = ((nu**N)*np.exp(-nu))/special.gamma(N+1)
# be careful here. This last expresion overflows when n=113 since result exp(308)
# Remember that Max normal number with 64 bit floating point is 1.7x10308
# so we have to redefine the calculations
posterior_pdf = ((nu**(N/2))*(np.exp(-nu))*(nu**(N/2)))/special.gamma(N+1)
plt.plot(nu, posterior_pdf, color = "black", linewidth= "1", label = r"P($\nu$|10)")
plt.plot(nu, stats.norm.pdf(nu, 10, sqrt(10)), color="red", linewidth= "1",
label=r"$\mathcal{N} \ (10,10)$")
print(np.argmax(posterior_pdf))
print(nu[np.argmax(posterior_pdf)])
plt.xlabel(r"$\nu$")
plt.ylabel(r"P($\nu$|10)")
plt.xlim(0,20)
plt.ylim(0, 0.14)
plt.xticks(np.arange(0,22,2))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
if exercise == 7:
N_obs=10
nu = np.random.uniform(0.0, N_obs + 10.0*sqrt(N_obs), size=10**7) # we generate nu that satisfies our prior
# N = np.array([])
count = 0
nu_accepted = np.array([])
for nu_i in nu:
N = np.random.poisson(nu_i, 1) # we generate one N for each value of nu
if N == N_obs:
nu_accepted = np.append(nu_accepted, nu_i)
count += 1
print(count)
plt.figure(1)
plt.hist(nu_accepted, bins= 30, normed=True, histtype="step", color="black")
# we compare it with the result in exercise 2.2
x_axis = np.linspace(0,300, 10000)
plt.plot(x_axis, stats.norm.pdf(x_axis, 10, sqrt(10)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (10,10)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(0,20)
plt.ylim(0, 0.14)
plt.xticks(np.arange(0,22,2))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
# we compute the estimators of the mean and variance from poisson_data
mean_estimator = np.mean(nu_accepted)
variance_estimator = np.var(nu_accepted, ddof=1)
print(mean_estimator, variance_estimator)
print("{0:.2f},{1:.2f}".format(mean_estimator, variance_estimator))
plt.figure(2)
plt.hist(nu_accepted, bins= 30, normed=True, histtype="step", color="black")
plt.plot(x_axis, stats.norm.pdf(x_axis, mean_estimator, sqrt(variance_estimator)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (11,11)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(0,20)
plt.ylim(0, 0.14)
plt.xticks(np.arange(0,22,2))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
#################################################################
# exercise 2.7 (if 8 -> repeat ex 2.4. if 9 ex -> 7 repeat ex 2.5)
if exercise == 8:
N = 1
nu = np.linspace(0,10,10000)
# posterior_pdf = ((nu**N)*np.exp(-nu))/special.gamma(N+1)
# be careful here. This last expresion overflows when n=113 since result exp(308)
# Remember that Max normal number with 64 bit floating point is 1.7x10308
# so we have to redefine the calculations
posterior_pdf = ((nu**(N/2))*(np.exp(-nu))*(nu**(N/2)))/special.gamma(N+1)
plt.plot(nu, posterior_pdf, color = "black", linewidth= "1", label = r"P($\nu$|1)")
plt.plot(nu, stats.norm.pdf(nu, 1, sqrt(1)), color="red", linewidth= "1",
label=r"$\mathcal{N} \ (1,1)$")
print(np.argmax(posterior_pdf))
print(nu[np.argmax(posterior_pdf)])
plt.xlabel(r"$\nu$")
plt.ylabel(r"P($\nu$|1)")
plt.xlim(0,5)
plt.ylim(0, 0.45)
plt.xticks(np.arange(0,5.5,0.5))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
if exercise == 9:
N_obs=1
nu = np.random.uniform(0.0, N_obs + 10.0*sqrt(N_obs), size=10**7) # we generate nu that satisfies our prior
# N = np.array([])
count = 0
nu_accepted = np.array([])
for nu_i in nu:
N = np.random.poisson(nu_i, 1) # we generate one N for each value of nu
if N == N_obs:
nu_accepted = np.append(nu_accepted, nu_i)
count += 1
print(count)
plt.figure(1)
plt.hist(nu_accepted, bins= 30, normed=True, histtype="step", color="black")
# we compare it with the result in exercise 2.2
x_axis = np.linspace(0,300, 10000)
plt.plot(x_axis, stats.norm.pdf(x_axis, 1, sqrt(1)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (1,1)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(0,5)
plt.ylim(0, 0.45)
plt.xticks(np.arange(0,5.5,0.5))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
# we compute the estimators of the mean and variance from poisson_data
mean_estimator = np.mean(nu_accepted)
variance_estimator = np.var(nu_accepted, ddof=1)
print(mean_estimator, variance_estimator)
print("{0:.2f},{1:.2f}".format(mean_estimator, variance_estimator))
plt.figure(2)
plt.hist(nu_accepted, bins= 30, normed=True, histtype="step", color="black")
plt.plot(x_axis, stats.norm.pdf(x_axis, mean_estimator, sqrt(variance_estimator)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (1,1)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(0,5)
plt.ylim(0, 0.45)
plt.xticks(np.arange(0,5.5,0.5))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
#################################################################
#################################################################
#################################################################
# exercise 2.8
if exercise == 10:
N = 150
nu = np.linspace(100,200,100000)
posterior_pdf = ((nu**((N-1)/2))*(np.exp(-nu))*(nu**((N-1)/2)))/special.gamma(N)
plt.plot(nu, posterior_pdf, color = "black", linewidth= "1", label = r"P($\nu$|150)")
plt.plot(nu, stats.norm.pdf(nu, 150, sqrt(150)), color="red", linewidth= "1",
label=r"$\mathcal{N} \ (150,150)$")
print(np.argmax(posterior_pdf))
print(nu[np.argmax(posterior_pdf)])
plt.xlabel(r"$\nu$")
plt.ylabel(r"P($\nu$|150)")
plt.xlim(100,200)
plt.ylim(0, 0.035)
plt.xticks(np.arange(100,210,10))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
#################################################################
if exercise == 11:
N_obs=150
# we generate nu that satisfies our prior.
# we have used the inv transformation MC method
# remember that we always use r = U[0,1] in this method
nu = np.exp(np.random.uniform(0.0, np.log(N_obs + 10.0*sqrt(N_obs)), size=10**7))
# N = np.array([])
count = 0
nu_accepted = np.array([])
for nu_i in nu:
N = np.random.poisson(nu_i, 1) # we generate one N for each value of nu
if N == N_obs:
nu_accepted = np.append(nu_accepted, nu_i)
count += 1
print(count, nu_i)
plt.figure(1)
plt.hist(nu_accepted, bins= 30, normed=True, histtype="step", color="black")
# we compare it with the result in exercise 2.2
x_axis = np.linspace(0,300, 10000)
plt.plot(x_axis, stats.norm.pdf(x_axis, 150, sqrt(150)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (150,150)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(100,200)
plt.ylim(0, 0.035)
plt.xticks(np.arange(100,210,10))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
# we compute the estimators of the mean and variance from poisson_data
mean_estimator = np.mean(nu_accepted)
variance_estimator = np.var(nu_accepted, ddof=1)
print(mean_estimator, variance_estimator)
print("{0:.2f},{1:.2f}".format(mean_estimator, variance_estimator))
plt.figure(2)
plt.hist(nu_accepted, bins= 30, normed=True, histtype="step", color="black")
plt.plot(x_axis, stats.norm.pdf(x_axis, mean_estimator, sqrt(variance_estimator)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (151,151)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(100,200)
plt.ylim(0, 0.035)
plt.xticks(np.arange(100,210,10))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
#################################################################
# exercise 2.6 (if 6 -> repeat ex 2.4. if 7 ex -> 7 repeat ex 2.5)
if exercise == 12:
N = 10
nu = np.linspace(0,50,100000)
# posterior_pdf = ((nu**N)*np.exp(-nu))/special.gamma(N+1)
# be careful here. This last expresion overflows when n=113 since result exp(308)
# Remember that Max normal number with 64 bit floating point is 1.7x10308
# so we have to redefine the calculations
posterior_pdf = ((nu**((N-1)/2))*(np.exp(-nu))*(nu**((N-1)/2)))/special.gamma(N)
plt.plot(nu, posterior_pdf, color = "black", linewidth= "1", label = r"P($\nu$|10)")
plt.plot(nu, stats.norm.pdf(nu, 10, sqrt(10)), color="red", linewidth= "1",
label=r"$\mathcal{N} \ (10,10)$")
print(np.argmax(posterior_pdf))
print(nu[np.argmax(posterior_pdf)])
plt.xlabel(r"$\nu$")
plt.ylabel(r"P($\nu$|10)")
plt.xlim(0,20)
plt.ylim(0, 0.14)
plt.xticks(np.arange(0,22,2))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
if exercise == 13:
N_obs=10
nu = np.exp(np.random.uniform(0.0, np.log(N_obs + 10.0*sqrt(N_obs)), size=10**7)) # we generate nu that satisfies our prior
# N = np.array([])
count = 0
nu_accepted = np.array([])
for nu_i in nu:
N = np.random.poisson(nu_i, 1) # we generate one N for each value of nu
if N == N_obs:
nu_accepted = np.append(nu_accepted, nu_i)
count += 1
print(count)
plt.figure(1)
plt.hist(nu_accepted, bins= 30, normed=True, histtype="step", color="black")
# we compare it with the result in exercise 2.2
x_axis = np.linspace(0,300, 10000)
plt.plot(x_axis, stats.norm.pdf(x_axis, 10, sqrt(10)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (10,10)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(0,20)
plt.ylim(0, 0.14)
plt.xticks(np.arange(0,22,2))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
# we compute the estimators of the mean and variance from poisson_data
mean_estimator = np.mean(nu_accepted)
variance_estimator = np.var(nu_accepted, ddof=1)
print(mean_estimator, variance_estimator)
print("{0:.2f},{1:.2f}".format(mean_estimator, variance_estimator))
plt.figure(2)
plt.hist(nu_accepted, bins= 30, normed=True, histtype="step", color="black")
plt.plot(x_axis, stats.norm.pdf(x_axis, mean_estimator, sqrt(variance_estimator)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (11,11)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(0,20)
plt.ylim(0, 0.14)
plt.xticks(np.arange(0,22,2))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
#################################################################
# exercise 2.7 (if 8 -> repeat ex 2.4. if 9 ex -> 7 repeat ex 2.5)
if exercise == 14:
N = 1
nu = np.linspace(0,10,10000)
# posterior_pdf = ((nu**N)*np.exp(-nu))/special.gamma(N+1)
# be careful here. This last expresion overflows when n=113 since result exp(308)
# Remember that Max normal number with 64 bit floating point is 1.7x10308
# so we have to redefine the calculations
posterior_pdf = ((nu**((N-1)/2))*(np.exp(-nu))*(nu**((N-1)/2)))/special.gamma(N)
plt.plot(nu, posterior_pdf, color = "black", linewidth= "1", label = r"P($\nu$|1)")
plt.plot(nu, stats.norm.pdf(nu, 1, sqrt(1)), color="red", linewidth= "1",
label=r"$\mathcal{N} \ (1,1)$")
print(np.argmax(posterior_pdf))
print(nu[np.argmax(posterior_pdf)])
plt.xlabel(r"$\nu$")
plt.ylabel(r"P($\nu$|1)")
plt.xlim(0,5)
plt.ylim(0, 1)
plt.xticks(np.arange(0,5.5,0.5))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
if exercise == 15:
N_obs=1
nu = np.exp(np.random.uniform(-100., np.log(N_obs + 10.0*sqrt(N_obs)), size=10**7)) # we generate nu that satisfies our prior
# N = np.array([])
count = 0
nu_accepted = np.array([])
for nu_i in nu:
N = np.random.poisson(nu_i, 1) # we generate one N for each value of nu
if N == N_obs:
nu_accepted = np.append(nu_accepted, nu_i)
count += 1
print(count)
plt.figure(1)
plt.hist(nu_accepted, bins= 40, normed=True, histtype="step", color="black")
# we compare it with the result in exercise 2.2
x_axis = np.linspace(0,300, 10000)
plt.plot(x_axis, stats.norm.pdf(x_axis, 1, sqrt(1)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (1,1)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(0,5)
plt.ylim(0, 0.60)
plt.xticks(np.arange(0,5.5,0.5))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
# we compute the estimators of the mean and variance from poisson_data
mean_estimator = np.mean(nu_accepted)
variance_estimator = np.var(nu_accepted, ddof=1)
print(mean_estimator, variance_estimator)
print("{0:.2f},{1:.2f}".format(mean_estimator, variance_estimator))
plt.figure(2)
plt.hist(nu_accepted, bins= 50, normed=True, histtype="step", color="black")
plt.plot(x_axis, stats.norm.pdf(x_axis, mean_estimator, sqrt(variance_estimator)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (1,1)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(0,5)
plt.ylim(0, 1.0)
plt.xticks(np.arange(0,5.5,0.5))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
plt.show()
np.savetxt("nu_accepted", nu_accepted)
if exercise == 16:
nu_accepted=np.loadtxt("nu_accepted")
plt.figure(1)
plt.hist(nu_accepted, bins= 40, normed=True, histtype="step", color="black")
# we compare it with the result in exercise 2.2
x_axis = np.linspace(0,300, 10000)
plt.plot(x_axis, stats.norm.pdf(x_axis, 1, sqrt(1)), color="red", linewidth = "1",
label= r"$\mathcal{N} \ (1,1)$")
plt.xlabel(r"$\nu$")
plt.ylabel(r"N($\nu$)")
plt.xlim(0,5)
plt.ylim(0, 0.60)
plt.xticks(np.arange(0,5.5,0.5))
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
# we compute the estimators of the mean and variance from poisson_data
mean_estimator = | np.mean(nu_accepted) | numpy.mean |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Email: <EMAIL>
Updated: <NAME>
Email: <EMAIL>
"""
import logging
import os
from math import log
import cv2
import numpy as np
import numpy.linalg as la
from scipy.spatial import distance as spd
import pywt
import Tyf
import tifffile as tiff
import rawpy
class Image(object):
""" Image class
Attributes:
full path
original image, may be uint16 type
fullsize gray image
exif info, Tyf.TiffFile type
image features
"""
def __init__(self, full_path):
super(Image, self).__init__()
self.full_path = full_path
self.dir, self.name = os.path.split(full_path)
self.focal_len = None
self.features = {}
self.tf = None
_, ext = os.path.splitext(full_path)
if ext.lower() in (".tiff", ".tif") and os.path.isfile(full_path):
self.original_image, self.exif_info = ImageProcessing.read_tif_image(full_path)
gray_img = cv2.cvtColor(self.original_image, cv2.COLOR_RGB2GRAY)
self.fullsize_gray_image = ImageProcessing.convert_to_float(gray_img)
elif ext.lower() in (".nef", ".arw", "cr2") and os.path.isfile(full_path):
self.original_image, self.exif_info = ImageProcessing.read_raw_image(full_path)
gray_img = cv2.cvtColor(self.original_image, cv2.COLOR_RGB2GRAY)
self.fullsize_gray_image = ImageProcessing.convert_to_float(gray_img)
else:
self.original_image = None
self.fullsize_gray_image = None
self.exif_info = None
self.reset_all()
def reset_focal_length(self):
f = self.get_exif_value("FocalLength")
if f and len(f) == 2:
self.focal_len = f[0] * 1.0 / f[1]
elif f and len(f) == 1:
self.focal_len = f[0]
else:
self.focal_len = None
def reset_all(self):
self.reset_focal_length()
self.features = {}
self.tf = None
def get_exif_value(self, name):
if not self.exif_info:
return None
info = self.exif_info[0].find(name)
if not info:
return None
else:
return info.value
class DataModel(object):
# Align options
AUTO_MASK = 1
ALIGN_STARS = 2
ALIGN_GROUND = 3
# Display options
ORIGINAL_IMAGE = 1
def __init__(self):
super(DataModel, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.images = []
self.ref_ind = 0
self.image_dir = None
self.final_sky_img = None # Of type double
self.final_ground_img = None # Of type double
# For concurrency issue
self.is_adding_image = False
# Other GUI options
self.merge_option_type = self.ALIGN_STARS
def add_image(self, path):
self.logger.debug("add_image()")
img_dir, name = os.path.split(path)
if not os.path.exists(path) or not os.path.isfile(path):
self.logger.error("File %s does not exist!", path)
return False
for img in self.images:
if path == img.full_path:
self.logger.info("Image already exists. File: %s", path)
return False
if self.is_adding_image:
return False
self.is_adding_image = True
img = Image(path)
focal_len = img.get_exif_value("FocalLength")
self.images.append(img)
self.logger.debug("Loading image %s... Focal length = %s", name, focal_len)
if not self.image_dir:
self.image_dir = img_dir
self.is_adding_image = False
return True
def update_final_sky(self, img):
self.logger.debug("update_final_sky()")
self.final_sky_num += 1
if self.final_sky_img is None and self.final_sky_num == 1:
self.final_sky_img = np.copy(img)
elif self.final_sky_img is not None and self.final_sky_num > 0:
# self.final_sky_img = np.fmax(self.final_sky_img, img)
self.final_sky_img = self.final_sky_img / self.final_sky_num * (self.final_sky_num - 1) + img / self.final_sky_num
def update_final_ground(self, img):
self.logger.debug("update_final_ground()")
self.final_ground_num += 1
if self.final_ground_img is None and self.final_ground_num == 1:
self.final_ground_img = np.copy(img)
elif self.final_ground_img is not None and self.final_ground_num > 0:
self.final_ground_img = self.final_ground_img / self.final_ground_num * (self.final_ground_num - 1) + img / self.final_ground_num
def clear_images(self):
self.logger.debug("clear_images()")
self.images = []
self.reset_final_sky()
self.reset_final_ground()
self.image_dir = None
self.ref_ind = 0
self.is_adding_image = False
def reset_final_sky(self):
self.logger.debug("reset_final_sky()")
self.final_sky_img = None
self.final_sky_num = 0
def reset_final_ground(self):
self.logger.debug("reset_final_ground()")
self.final_ground_img = None
self.final_ground_num = 0
def reset_result(self):
self.logger.debug("reset_result()")
self.reset_final_sky()
self.reset_final_ground()
for img in self.images:
img.features = {}
def has_image(self):
res = len(self.images) > 0
self.logger.debug("has_image(): %s", res)
return res
def iter_images(self):
self.logger.debug("iter_images()")
return iter(self.images)
def total_images(self):
res = len(self.images)
self.logger.debug("total_images(): %s", res)
return res
def has_sky_result(self):
res = self.final_sky_img is not None
self.logger.debug("has_sky_result(): %s", res)
return res
def has_ground_result(self):
res = self.final_ground_img is not None
self.logger.debug("has_ground_result(): %s", res)
return res
class ImageProcessing(object):
def __init__(self):
super(ImageProcessing, self).__init__()
@staticmethod
def wavelet_dec_rec(img_blr, resize_factor=0.25):
'''
wavelet_dec_rec
Take a picture, does a wavelet decompsition, remove the low frequency (approximation) and highest details (noises)
and return the recomposed picture
'''
img_shape = img_blr.shape
need_resize = abs(resize_factor - 1) > 0.001
level = int(6 - log(1 / resize_factor, 2))
if need_resize:
img_blr_resize = cv2.resize(img_blr, None, fx=resize_factor, fy=resize_factor)
else:
img_blr_resize = img_blr
coeffs = pywt.wavedec2(img_blr_resize, "db8", level=level)
#remove the low freq (approximation)
coeffs[0].fill(0)
#remove the highest details (noise??)
coeffs[-1][0].fill(0)
coeffs[-1][1].fill(0)
coeffs[-1][2].fill(0)
img_rec_resize = pywt.waverec2(coeffs, "db8")
if need_resize:
img_rec = cv2.resize(img_rec_resize, (img_shape[1], img_shape[0]))
else:
img_rec = img_rec_resize
return img_rec
@staticmethod
def detect_star_points(img_gray, mask=None, resize_length=2200):
logging.debug("detect_star_point()")
logging.debug("resize_length = %s", resize_length)
sigma = 2
img_shape = img_gray.shape
img_blr = cv2.GaussianBlur(img_gray, (9, 9), sigma)
img_blr_mean=np.mean(img_blr)
img_blr_range=np.max(img_blr) - np.min(img_blr)
img_blr = (img_blr - img_blr_mean) / img_blr_range
resize_factor = 1
while max(img_shape) * resize_factor > resize_length:
resize_factor /= 2
logging.debug("Calculating mask...")
tmp_mask = cv2.resize(img_gray, None, fx=resize_factor, fy=resize_factor)
tmp_mask_10percent=np.percentile(tmp_mask, 10)
tmp_mask = (tmp_mask < min(tmp_mask_10percent, 0.15)).astype(np.uint8) * 255
## tmp_mask = np.logical_and(tmp_mask < np.percentile(tmp_mask, 10), tmp_mask < 0.15).astype(np.uint8) * 255
logging.debug("Mask logical selection complete")
dilate_size = int(max(img_shape) * 0.02 * resize_factor * 5)
tmp_mask = 255 - cv2.dilate(tmp_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (dilate_size, dilate_size)))
tmp_mask = cv2.resize(tmp_mask, (img_shape[1], img_shape[0]))
if mask is None:
mask = tmp_mask > 127
else:
mask = np.logical_and(tmp_mask > 127, mask > 0)
logging.debug("Mask calculation Complete")
mask_rate = np.sum(mask) * 100.0 / np.prod(mask.shape)
logging.debug("mask rate: %.2f", mask_rate)
if mask_rate < 50:
mask = np.ones(tmp_mask.shape, dtype="bool")
while True:
try:
## img_rec = ImageProcessing.wavelet_dec_rec(contrast_img, resize_factor=resize_factor) * mask
img_rec = ImageProcessing.wavelet_dec_rec(img_blr, resize_factor=resize_factor) * mask
bw = ((img_rec > np.percentile(img_rec[mask], 99.5)) * mask).astype(np.uint8) * 255
bw = cv2.morphologyEx(bw, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
contours, _ = cv2.findContours(np.copy(bw), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contours = [contour for contour in contours if len(contour) > 5]
logging.debug("%d star points detected", len(contours))
if len(contours) > 400:
break
else:
raise ValueError("No enough points")
except ValueError as e:
if resize_factor >= 1:
raise ValueError("Cannot detect enough star points:" + str(e))
else:
resize_factor *= 2
logging.debug("resize factor = %f", resize_factor)
#elps - match contours to an ellipse. Return a Box2D - coordinates of a rotated rectangle - 3x tuples
#first tuple is the center of the box, the second gives the width and the height and the last is the angle.
elps = [cv2.fitEllipse(contour) for contour in contours]
#centroids - the "center" of the ellipses
centroids = np.array([e[0] for e in elps])
#areas - the areas of the contours, but 0.5*len(contour)?
areas = np.array([cv2.contourArea(contour) + 0.5 * len(contour) for contour in contours])
#eccentricities - how irregular the ellipse is.
eccentricities = np.sqrt(np.array([1 - (elp[1][0] / elp[1][1]) ** 2 for elp in elps]))
# Calculate "intensity"
mask = np.zeros(bw.shape, np.uint8)
intensities = np.zeros(areas.shape)
for i in range(len(contours)):
cv2.drawContours(mask, contours[i], 0, 255, -1)
#It is a straight rectangle, it doesn't consider the rotation of the object. .
#Let (x,y) be the top-left coordinate of the rectangle and (w,h) be its width and height.
#x,y,w,h = cv2.boundingRect(cnt)
rect = cv2.boundingRect(contours[i])
val = cv2.mean(img_rec[rect[1]:rect[1] + rect[3] + 1, rect[0]:rect[0] + rect[2] + 1],
mask[rect[1]:rect[1] + rect[3] + 1, rect[0]:rect[0] + rect[2] + 1])
mask[rect[1]:rect[1] + rect[3] + 1, rect[0]:rect[0] + rect[2] + 1] = 0
intensities[i] = val[0]
valid_stars = np.logical_and(areas > 20, areas < 200, eccentricities < .8)
valid_stars = np.logical_and(valid_stars, areas > np.percentile(areas, 20), intensities > np.percentile(intensities, 20))
star_pts = centroids[valid_stars] # [x, y]
print("Final star points = {0}".format(len(star_pts)))
areas = areas[valid_stars]
intensities = intensities[valid_stars]
return star_pts, areas * intensities
@staticmethod
def convert_to_spherical_coord(star_pts, img_size, focal_length,crop_factor=1.0):
'''
convert_to_spherical_coord
Input:
start_pts: np array of start points in x,y coodinates
img_size: image size in pixels
focal_length: focal length, the "real focal length" before crop factor. In real life no real effect observed.
crop_factor: sensor crop factor. In real life no real effect is observed.
Output: theta and phi in spherical corrdinates.
'''
logging.debug("convert_coord_img_sph(Focal length {0}, crop_factor {1})".format(focal_length,crop_factor))
FullFrameSensorWidth=36 #Full frame sensor width is 36mm
sensorSize=FullFrameSensorWidth/crop_factor #Actual sensor size
PPMM = np.max(img_size)/sensorSize #Pixels per mm
p0 = (star_pts - img_size / 2.0) #Adjust start x,y coords to the middle of lens
p=p0/PPMM #Convert coordinates to mm
## p0 = (star_pts - img_size / 2.0) / (np.max(img_size) / 2)
## p = p0 * 18 # Fullframe half size, 18mm
## p = p0 * 18 / crop_factor # Fullframe half size, 18mm, with crop factor 1.5
#Now, with the original point in the center of lens, the x,y,z is actually focal_length,x, y
#therefore, tan(theta) = x/focal_length
#ro**2 = x**2 + y**2 + focal_length **2, and
#sin(phi) = y/ro = y/sqrt(x**2 + y**2 + focal_length **2)
theta = np.arctan2(p[:, 0], focal_length)
phi = np.arcsin(p[:, 1] / np.sqrt(np.sum(p ** 2, axis=1) + focal_length ** 2))
return np.stack((theta, phi), axis=-1)
@staticmethod
def extract_point_features(sph, vol, k=15):
'''
extract_point_features
Calculate the "features", or signatures of each starpoint
Identified by the angles (theta) and distances (ro) to K "neighbors"
input: sph: Spherical coordinates, theta([:0]) and phi ([:1])
vol: "Volume", i.e. the product of area and intensity/average luminosity
k: number of "neighbors"
output: Array of "features" of each star point derived from relationship between K neighbors:
theta: angle from the star point
rho: distance from the star point
vol: volume
'''
logging.debug("extract_point_features()")
pts_num = len(sph)
#convert theta,phi to x,y,z, assuming ro is 1
vec = np.stack((np.cos(sph[:, 1]) * np.cos(sph[:, 0]),
np.cos(sph[:, 1]) * np.sin(sph[:, 0]),
| np.sin(sph[:, 1]) | numpy.sin |
# -*- coding: utf-8 -*-
import gc
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import lightgbm as lgb
from scipy import stats
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold, cross_val_score, GridSearchCV, RepeatedStratifiedKFold
from sklearn.preprocessing import StandardScaler
import os
import plotly.offline as py
import plotly.graph_objs as go
import plotly.tools as tls
import xgboost as xgb
import lightgbm as lgb
from catboost import CatBoostClassifier
from sklearn import model_selection
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn import metrics
import json
import ast
import time
from sklearn import linear_model
import eli5
from eli5.sklearn import PermutationImportance
import shap
from tqdm import tqdm_notebook
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
from sklearn.neighbors import NearestNeighbors
from sklearn.feature_selection import GenericUnivariateSelect, SelectPercentile, SelectKBest, f_classif, mutual_info_classif, RFE
import statsmodels.api as sm
import warnings
warnings.filterwarnings('ignore')
from src.util.log_util import set_logger
logger = set_logger(__name__)
def calculate_metrics(model, X_train: pd.DataFrame() = None, y_train: pd.DataFrame() = None, X_valid: pd.DataFrame() = None,
y_valid: pd.DataFrame() = None, columns: list = []) -> pd.DataFrame():
columns = columns if len(columns) > 0 else list(X_train.columns)
train_pred = model.predict_proba(X_train[columns])
valid_pred = model.predict_proba(X_valid[columns])
f1 = 0
best_t = 0
for t in np.arange(0.1, 1, 0.05):
valid_pr = (valid_pred[:, 1] > t).astype(int)
valid_f1 = metrics.f1_score(y_valid, valid_pr)
if valid_f1 > f1:
f1 = valid_f1
best_t = t
t = best_t
train_pr = (train_pred[:, 1] > t).astype(int)
valid_pr = (valid_pred[:, 1] > t).astype(int)
train_f1 = metrics.f1_score(y_train, train_pr)
valid_f1 = metrics.f1_score(y_valid, valid_pr)
score_df = []
print(f'Best threshold: {t:.2f}. Train f1: {train_f1:.4f}. Valid f1: {valid_f1:.4f}.')
score_df.append(['F1', np.round(train_f1, 4), np.round(valid_f1, 4)])
train_r = metrics.recall_score(y_train, train_pr)
valid_r = metrics.recall_score(y_valid, valid_pr)
score_df.append(['Recall', np.round(train_r, 4), np.round(valid_r, 4)])
train_p = metrics.precision_score(y_train, train_pr)
valid_p = metrics.precision_score(y_valid, valid_pr)
score_df.append(['Precision', np.round(train_p, 4), | np.round(valid_p, 4) | numpy.round |
import os
from pathlib import Path
import numpy as np
from nose import tools
from photomanip import PAD, CROP
from photomanip.grouper import FileSystemGrouper
from photomanip.manipulator import ImageManipulatorSKI
class TestIMKSI:
@classmethod
def setup_class(cls):
cls.fs_grouper = FileSystemGrouper('photomanip/tests/')
cls.im_ski = ImageManipulatorSKI()
cls.color_test_image = np.ones((111, 131, 3), dtype=np.bool)
cls.bw_test_image_3 = np.ones((111, 131, 1), dtype=np.bool)
cls.bw_test_image_1 = np.ones((111, 131), dtype=np.bool)
cls.ski_pad_fname = Path('photomanip/tests/year_pad_ski.jpeg')
cls.ski_crop_fname = Path('photomanip/tests/year_crop_ski.jpeg')
@classmethod
def teardown_class(cls):
os.unlink(cls.ski_pad_fname)
os.unlink(cls.ski_crop_fname)
def test_even_image(self):
# color image
test_image = self.im_ski._even_image(self.color_test_image)
tools.eq_(test_image.shape, (110, 130, 3))
# bw image
test_image = self.im_ski._even_image(self.bw_test_image_3)
tools.eq_(test_image.shape, (110, 130, 1))
test_image = self.im_ski._even_image(self.bw_test_image_1)
tools.eq_(test_image.shape, (110, 130, 1))
def test_pad_image(self):
# color image
test_image = self.im_ski._pad_image(self.color_test_image, 200)
tools.eq_(test_image.shape, (200, 200, 3))
# bw image
test_image = self.im_ski._pad_image(self.bw_test_image_3, 200)
tools.eq_(test_image.shape, (200, 200, 1))
test_image = self.im_ski._pad_image(self.bw_test_image_1, 200)
tools.eq_(test_image.shape, (200, 200, 1))
def test_square_image(self):
# color image
test_image = self.im_ski._even_image(self.color_test_image)
test_image = self.im_ski._square_image(test_image, 100)
tools.eq_(test_image.shape, (100, 100, 3))
# bw image
test_image = self.im_ski._even_image(self.bw_test_image_3)
test_image = self.im_ski._square_image(test_image, 100)
tools.eq_(test_image.shape, (100, 100, 1))
test_image = self.im_ski._even_image(self.bw_test_image_1)
test_image = self.im_ski._square_image(test_image, 100)
tools.eq_(test_image.shape, (100, 100, 1))
def test_split_scale_image(self):
# color image
test_image = self.im_ski.split_scale_image(
self.color_test_image * 3,
3
)
tools.eq_( | np.any(test_image != 1) | numpy.any |
# unit tests for iteration_tools.py
import pytest
import os
import netCDF4
import numpy as np
import matplotlib.pyplot as plt
import unittest.mock as mock
from pyDeltaRCM.model import DeltaModel
from . import utilities
class TestSolveWaterAndSedimentTimestep:
def test_solve_water_and_sediment_timestep_defaults(self, tmp_path):
# create a delta with default settings
p = utilities.yaml_from_dict(tmp_path, 'input.yaml')
delta = DeltaModel(input_file=p)
# mock top-level methods, verify call was made to each
delta.log_info = mock.MagicMock()
delta.init_water_iteration = mock.MagicMock()
delta.run_water_iteration = mock.MagicMock()
delta.compute_free_surface = mock.MagicMock()
delta.finalize_water_iteration = mock.MagicMock()
delta.route_sediment = mock.MagicMock()
# run the timestep
delta.solve_water_and_sediment_timestep()
# assert that methods are called
assert delta.init_water_iteration.called is True
assert delta.run_water_iteration.called is True
assert delta.compute_free_surface.called is True
assert delta.finalize_water_iteration.called is True
_calls = [mock.call(0), mock.call(1), mock.call(2)]
delta.finalize_water_iteration.assert_has_calls(
_calls, any_order=False)
assert delta.finalize_water_iteration.call_count == 3
assert (delta.route_sediment.called is True)
assert (delta._is_finalized is False)
def test_solve_water_and_sediment_timestep_itermax_10(self, tmp_path):
# create a delta with different itermax
p = utilities.yaml_from_dict(tmp_path, 'input.yaml',
{'itermax': 10})
delta = DeltaModel(input_file=p)
# mock top-level methods, verify call was made to each
delta.log_info = mock.MagicMock()
delta.init_water_iteration = mock.MagicMock()
delta.run_water_iteration = mock.MagicMock()
delta.compute_free_surface = mock.MagicMock()
delta.finalize_water_iteration = mock.MagicMock()
delta.route_sediment = mock.MagicMock()
# run the timestep
delta.solve_water_and_sediment_timestep()
# assert that methods are called
assert delta.init_water_iteration.called is True
assert delta.run_water_iteration.called is True
assert delta.compute_free_surface.called is True
assert delta.finalize_water_iteration.called is True
_calls = [mock.call(i) for i in range(10)]
delta.finalize_water_iteration.assert_has_calls(
_calls, any_order=False)
assert delta.finalize_water_iteration.call_count == 10
assert (delta.route_sediment.called is True)
assert (delta._is_finalized is False)
def test_run_one_timestep_deprecated(self, tmp_path):
# create a delta with default settings
p = utilities.yaml_from_dict(tmp_path, 'input.yaml')
_delta = DeltaModel(input_file=p)
# mock top-level methods
_delta.logger = mock.MagicMock()
_delta.solve_water_and_sediment_timestep = mock.MagicMock()
# check warning raised
with pytest.warns(UserWarning):
_delta.run_one_timestep()
# and logged
assert (_delta.logger.warning.called is True)
class TestFinalizeTimestep:
def test_finalize_timestep(self, tmp_path):
# create a delta with default settings
p = utilities.yaml_from_dict(tmp_path, 'input.yaml',
{'SLR': 0.001})
delta = DeltaModel(input_file=p)
# mock the flooding correction and log
delta.log_info = mock.MagicMock()
delta.flooding_correction = mock.MagicMock()
# run the step
delta.finalize_timestep()
# assert submethod called once
delta.flooding_correction.call_count == 1
# check that sea level rose as expected
assert delta.H_SL == 25
class TestApplyingSubsidence:
def test_subsidence_in_update(self, tmp_path):
# create a delta with subsidence parameters
p = utilities.yaml_from_dict(tmp_path, 'input.yaml',
{'toggle_subsidence': True,
'subsidence_rate': 1e-8,
'start_subsidence': 0,
'save_eta_grids': True,
'seed': 0})
_delta = DeltaModel(input_file=p)
# mock the timestep computations
_delta.solve_water_and_sediment_timestep = mock.MagicMock()
assert _delta.dt == 25000
assert _delta.subsidence_rate == 1e-8
assert np.all(_delta.sigma[:_delta.L0, :] == 0.0) # outside the sigma mask
assert np.all(_delta.sigma[_delta.L0:, :] == 0.00025) # inside the sigma mask
assert | np.all(_delta.eta[_delta.L0:, :] == -_delta.h0) | numpy.all |
## Portions of Code from, copyright 2018 <NAME>
from __future__ import absolute_import, division, print_function
import torch
import numpy as np
from scipy import ndimage
import png
def numpy2torch(array):
assert(isinstance(array, np.ndarray))
if array.ndim == 3:
array = np.transpose(array, (2, 0, 1))
else:
array = | np.expand_dims(array, axis=0) | numpy.expand_dims |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for slater_determinants.py."""
from __future__ import absolute_import
import numpy
import unittest
from scipy.linalg import qr
from openfermion.utils import (fermionic_gaussian_decomposition,
givens_decomposition)
from openfermion.utils._slater_determinants import (
diagonalizing_fermionic_unitary, double_givens_rotate, givens_rotate,
swap_rows)
class GivensDecompositionTest(unittest.TestCase):
def test_bad_dimensions(self):
m, n = (3, 2)
# Obtain a random matrix of orthonormal rows
x = numpy.random.randn(m, m)
y = numpy.random.randn(m, m)
A = x + 1.j*y
Q, R = qr(A)
Q = Q[:m, :n]
with self.assertRaises(ValueError):
V, givens_rotations, diagonal = givens_decomposition(Q)
def test_identity(self):
n = 3
Q = numpy.eye(n, dtype=complex)
V, givens_rotations, diagonal = givens_decomposition(Q)
# V should be the identity
I = numpy.eye(n, dtype=complex)
for i in range(n):
for j in range(n):
self.assertAlmostEqual(V[i, j], I[i, j])
# There should be no Givens rotations
self.assertEqual(givens_rotations, list())
# The diagonal should be ones
for d in diagonal:
self.assertAlmostEqual(d, 1.)
def test_antidiagonal(self):
m, n = (3, 3)
Q = numpy.zeros((m, n), dtype=complex)
Q[0, 2] = 1.
Q[1, 1] = 1.
Q[2, 0] = 1.
V, givens_rotations, diagonal = givens_decomposition(Q)
# There should be no Givens rotations
self.assertEqual(givens_rotations, list())
# VQ should equal the diagonal
VQ = V.dot(Q)
D = numpy.zeros((m, n), dtype=complex)
D[numpy.diag_indices(m)] = diagonal
for i in range(n):
for j in range(n):
self.assertAlmostEqual(VQ[i, j], D[i, j])
def test_3_by_3(self):
m, n = (3, 3)
# Obtain a random matrix of orthonormal rows
x = numpy.random.randn(n, n)
y = numpy.random.randn(n, n)
A = x + 1.j*y
Q, R = qr(A)
Q = Q[:m, :]
# Get Givens decomposition of Q
V, givens_rotations, diagonal = givens_decomposition(Q)
# There should be no Givens rotations
self.assertEqual(givens_rotations, list())
# Compute V * Q * U^\dagger
W = V.dot(Q)
# Construct the diagonal matrix
D = numpy.zeros((m, n), dtype=complex)
D[ | numpy.diag_indices(m) | numpy.diag_indices |
import RPi.GPIO as GPIO
from time import sleep
import pybullet as p
import pybullet_data
import numpy as np
import Encoder
import argparse
# Motor--------------------------------------
pwm_frequency = 1000
encoder_count_per_rotation = 810
# constants
Ts = 23.5/1000 # Nm (stall torque)
Is = 1.8 # A (stall current)
R = 8.4 # Ohm
V = 12 # Voltage [V]
noLoadCurr = 70/1000 # A
noLoadSpeed = 7000*2*np.pi/60 # rad / s
Kt = Ts/Is
Ke = (V - R*noLoadCurr)/noLoadSpeed
# V0 = R/Kt*force0 +Ke*v0
# V1 = R/Kt*force1 +Ke*v1
# V2 = R/Kt*force2 +Ke*v2
# GPIOs--------------------------------------
# First Motor related
motor_driver_0_reverse_enable_pin = 4 # GPIO 4
motor_driver_0_forward_enable_pin = 17 # GPIO 17
motor_driver_0_reverse_pwm_pin = 27 # GPIO 27
motor_driver_0_forward_pwm_pin = 22 # GPIO 22
motor_0_Encoder_A_pin = 18 # GPIO 18
motor_0_Encoder_B_pin = 23 # GPIO 23
# Second Motor related
motor_driver_1_reverse_enable_pin = 10 # GPIO 10
motor_driver_1_forward_enable_pin = 9 # GPIO 9
motor_driver_1_reverse_pwm_pin = 11 # GPIO 11
motor_driver_1_forward_pwm_pin = 5 # GPIO 5
motor_1_Encoder_A_pin = 24 # GPIO 24
motor_1_Encoder_B_pin = 25 # GPIO 25
# Third Motor related
motor_driver_2_reverse_enable_pin = 6 # GPIO 6
motor_driver_2_forward_enable_pin = 13 # GPIO 13
motor_driver_2_reverse_pwm_pin = 19 # GPIO 19
motor_driver_2_forward_pwm_pin = 26 # GPIO 26
motor_2_Encoder_A_pin = 12 # GPIO 12
motor_2_Encoder_B_pin = 16 # GPIO 16
# GPIO initialization--------------------------------------
GPIO.setmode(GPIO.BCM)
# First Motor related
GPIO.setup(motor_driver_0_reverse_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_0_forward_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_0_reverse_pwm_pin, GPIO.OUT)
GPIO.setup(motor_driver_0_forward_pwm_pin, GPIO.OUT)
GPIO.setup(motor_0_Encoder_A_pin, GPIO.IN)
GPIO.setup(motor_0_Encoder_B_pin, GPIO.IN)
motor_0_encoder = Encoder.Encoder(motor_0_Encoder_A_pin, motor_0_Encoder_B_pin)
motor_driver_0_reverse_pwm = GPIO.PWM(motor_driver_0_reverse_pwm_pin, pwm_frequency)
motor_driver_0_forward_pwm = GPIO.PWM(motor_driver_0_forward_pwm_pin, pwm_frequency)
# Second Motor related
GPIO.setup(motor_driver_1_reverse_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_1_forward_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_1_reverse_pwm_pin, GPIO.OUT)
GPIO.setup(motor_driver_1_forward_pwm_pin, GPIO.OUT)
GPIO.setup(motor_1_Encoder_A_pin, GPIO.IN)
GPIO.setup(motor_1_Encoder_B_pin, GPIO.IN)
motor_1_encoder = Encoder.Encoder(motor_1_Encoder_A_pin, motor_1_Encoder_B_pin)
motor_driver_1_reverse_pwm = GPIO.PWM(motor_driver_1_reverse_pwm_pin, pwm_frequency)
motor_driver_1_forward_pwm = GPIO.PWM(motor_driver_1_forward_pwm_pin, pwm_frequency)
# Third Motor related
GPIO.setup(motor_driver_2_reverse_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_2_forward_enable_pin, GPIO.OUT)
GPIO.setup(motor_driver_2_reverse_pwm_pin, GPIO.OUT)
GPIO.setup(motor_driver_2_forward_pwm_pin, GPIO.OUT)
GPIO.setup(motor_2_Encoder_A_pin, GPIO.IN)
GPIO.setup(motor_2_Encoder_B_pin, GPIO.IN)
motor_2_encoder = Encoder.Encoder(motor_2_Encoder_A_pin, motor_2_Encoder_B_pin)
motor_driver_2_reverse_pwm = GPIO.PWM(motor_driver_2_reverse_pwm_pin, pwm_frequency)
motor_driver_2_forward_pwm = GPIO.PWM(motor_driver_2_forward_pwm_pin, pwm_frequency)
motor_driver_2_forward_pwm.start(0)
motor_driver_2_forward_pwm.changeDutyCycle()
# End of initialization--------------------------------------
# Argument Parsing-------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('x',
type=float,
help='1st target end effector world coordinate x')
parser.add_argument('y',
type=float,
help='1st target end effector world coordinate y')
parser.add_argument('z',
type=float,
help='1st target end effector world coordinate z')
parser.add_argument('--weight',
type = float,
help = 'set payload weight to pick up')
args = parser.parse_args()
# Argument Conversion-------------------------------------------
target = [args.x,args.y,args.z]
destination = [-args.x,args.y,args.z]
offsetJoint3 = -10*np.pi/180
tol = 1e-2
# Can alternatively pass in p.DIRECT
# client = p.connect(p.GUI)
client = p.connect(p.DIRECT)
p.setGravity(0, 0, -9.81, physicsClientId=client)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
planeId = p.loadURDF("plane.urdf")
''' LOAD '''
flags = p.URDF_USE_SELF_COLLISION
bodyId = p.loadURDF("./data/Arm_Final_Planet_hook/urdf/Arm_Final_Planet_hook.urdf",
basePosition=[0,0,0],useFixedBase=True,flags=flags)
''' LOAD '''
position, orientation = p.getBasePositionAndOrientation(bodyId)
numJoints = p.getNumJoints(bodyId)
print("number of joints = ",numJoints)
# Variable Declaration-------------------------------------------
torqueLog0 = []
torqueLog1 = []
torqueLog2 = []
errorLog0 = []
errorLog1 = []
errorLog2 = []
angLog0 = []
angLog1 = []
angLog2 = []
minE = []
prev_error0 = 0
prev_error1 = 0
prev_error2 = 0
cumul_e0 = 0
cumul_e1 = 0
cumul_e2 = 0
# Parameters-------------------------------------------
# First Motor related
kp0 = 2e-2
ki0 = 1e-8
kd0 = 2e-2
# Second Motor related
kp1 = 3e-2
ki1 = 1e-7
kd1 = 4e-2
# Third Motor related
kp2 = 2e-2
ki2 = 1e-4
kd2 = 2e-2
# prevPose = [0, 0, 0]
# prevPose1 = [0, 0, 0]
trailDuration = 15
hasPrevPose = 0
p.enableJointForceTorqueSensor(bodyId,0)
p.enableJointForceTorqueSensor(bodyId,1)
p.enableJointForceTorqueSensor(bodyId,2)
maxForce = 0
p.setJointMotorControl2(bodyId, 0,controlMode=p.VELOCITY_CONTROL, force=maxForce)
p.setJointMotorControl2(bodyId, 1,controlMode=p.VELOCITY_CONTROL, force=maxForce)
p.setJointMotorControl2(bodyId, 2,controlMode=p.VELOCITY_CONTROL, force=maxForce)
ind,Name,tp,qInd,uInd,flags,damp, \
friction,ll,ul,maxF,maxV,linkName, \
ax,parPos,parOrn,ParInd = p.getJointInfo(bodyId,0)
p.changeDynamics(bodyId,0, rollingFriction = 1e-5, spinningFriction = 1e-4)
p.changeDynamics(bodyId,1,jointLowerLimit = -np.pi,jointUpperLimit = np.pi, rollingFriction = 1e-5,spinningFriction = 1e-2)
p.changeDynamics(bodyId,2,jointLowerLimit = -np.pi,jointUpperLimit = np.pi, rollingFriction = 1e-5,spinningFriction = 1e-2)
ee_mass = p.getDynamicsInfo(bodyId,3)[0]
print("mass of end effector : ", ee_mass)
targetORN = np.asarray( p.calculateInverseKinematics(bodyId,3,target) )
destORN = np.asarray( p.calculateInverseKinematics(bodyId,3,destination) )
if(targetORN[1]>0):
targetORN[1]*= -1
print("inverting target joint 1 angle")
targetORN[0]+=np.pi
print("180 target rotating joint 0")
if(targetORN[2]>0):
print("inverting target joint 2 angle")
targetORN[2]*=-1
if(destORN[1]>0):
destORN[1]*= -1
print("inverting dest joint 1 angle")
destORN[0]+=np.pi
print("180 dest rotating joint 0")
if(destORN[2]>0):
print("inverting dest joint 2 angle")
destORN[2]*=-1
print("target pos 0 (deg.):",targetORN[0]*180/np.pi)
print("target pos 1 (deg.):",targetORN[1]*180/np.pi)
print("target pos 2 (deg.):",targetORN[2]*180/np.pi)
print("destination pos 0 (deg.):",destORN[0]*180/np.pi)
print("destination pos 1 (deg.):",destORN[1]*180/np.pi)
print("destination pos 2 (deg.):",destORN[2]*180/np.pi)
state1 = False
a1 = False
state2 = False
a2 = False
state3 = False
a3 = False
state4 = False
a4 = False
payload = args.weight
duration = 30000
p.setRealTimeSimulation(0)
# t0 = time.time()
# dt = 1/240
termTime = 0
dt = 0.05 # 50ms per loop
def main():
# pos, ori = p.getBasePositionAndOrientation(bodyId)
if state1==False and a1 == False:
targetORN[2]+=offsetJoint3
a1 = True
print("Attempting at state 1 . . .")
elif state1==True and state2==False and a2==False:
targetORN[2]-=offsetJoint3
a2 = True
print("Attempting at state 2 . . .")
elif state2==True and state3==False and a3==False:
targetORN = destORN
a3 = True
print("Attempting at state 3 . . .")
elif state3==True and state4==False and a4==False:
print("Reached state 3, attempting to place the load and disengage . . .")
a4 = True
targetORN[1]-=0.5*offsetJoint3
targetORN[2]+=2*offsetJoint3
pos = [0,0,0]
pos0,vel0,RF0,torque0 = p.getJointState(bodyId,0)
pos1,vel1,RF1,torque1 = p.getJointState(bodyId,1)
pos2,vel2,RF2,torque2 = p.getJointState(bodyId,2)
''' For PID '''
error0 = targetORN[0]-pos0
error1 = targetORN[1]-pos1
error2 = targetORN[2]-pos2
# Divide this by actual step size when we control real robot.
de0 = error0 - prev_error0
de1 = error1 - prev_error1
de2 = error2 - prev_error2
cumul_e0 += error0
cumul_e1 += error1
cumul_e2 += error2
''' We Need angle and velocity for this: pos, vel '''
# radians = (enccoder value / encoder_count_per_rotation) * 2*Pi
pos0 = (motor_0_encoder.read() / encoder_count_per_rotation) * 2*np.pi # rad
vel0 = pos0 / dt # rad/s
pos1 = (motor_1_encoder.read() / encoder_count_per_rotation) * 2*np.pi # rad
vel1 = pos1 / dt # rad/s
pos2 = (motor_2_encoder.read() / encoder_count_per_rotation) * 2*np.pi # rad
vel2 = pos2 / dt # rad/s
tau0,tau1,tau2 = p.calculateInverseDynamics(bodyId,
[pos0,pos1,pos2],
[vel0,vel1,vel2],
[0,0,0])
''' PID '''
T0 = kp0*(error0) + kd0*(de0/dt) + ki0*cumul_e0
T1 = kp1*(error1) + kd1*(de1/dt) + ki1*cumul_e1
T2 = kp2*(error2) + kd2*(de2/dt) + ki2*cumul_e2
prev_error0 = error0
prev_error1 = error1
prev_error2 = error2
####
####
# tau0 = 0
force0 = T0 + tau0
force1 = T1 + tau1
force2 = T2 + tau2
p.setJointMotorControl2(bodyId,0,controlMode = p.TORQUE_CONTROL, force = force0)
p.setJointMotorControl2(bodyId,1,controlMode = p.TORQUE_CONTROL, force = force1)
p.setJointMotorControl2(bodyId,2,controlMode = p.TORQUE_CONTROL, force = force2)
errorLog0.append(error0*180/np.pi)
errorLog1.append(error1*180/np.pi)
errorLog2.append(error2*180/np.pi)
torqueLog0.append(force0)
torqueLog1.append(force1)
torqueLog2.append(force2)
angLog0.append(pos0*180/np.pi)
angLog1.append(pos1*180/np.pi)
angLog2.append(pos2*180/np.pi)
minE.append(False)
if (np.abs(vel0)+np.abs(vel1)+np.abs(vel2)) <tol and \
(np.abs(error0)+ | np.abs(error1) | numpy.abs |
import pytest
import numpy
import scipy
from numpy.testing import assert_allclose
from jadapy import jdqz
from jadapy import Target
from jadapy.utils import norm
REAL_DTYPES = [numpy.float32, numpy.float64]
COMPLEX_DTYPES = [numpy.complex64, numpy.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
if dtype in COMPLEX_DTYPES:
return (numpy.random.rand(*shape) + numpy.random.rand(*shape) * 1.0j).astype(dtype)
return numpy.random.rand(*shape).astype(dtype)
def generate_test_matrix(shape, dtype):
a = generate_random_dtype_array(shape, dtype)
a += 3 * numpy.diag(numpy.ones([shape[0]], dtype))
return a
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqz_smallest_magnitude(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
b = generate_test_matrix([n, n], dtype)
alpha, beta = jdqz.jdqz(a, b, num=k, tol=tol)
jdqz_eigs = numpy.array(sorted(alpha / beta, key=lambda x: abs(x)))
eigs = scipy.linalg.eigvals(a, b)
eigs = numpy.array(sorted(eigs, key=lambda x: abs(x)))
eigs = eigs[:k]
assert_allclose(jdqz_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqz_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqz_largest_magnitude(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
b = generate_test_matrix([n, n], dtype)
alpha, beta = jdqz.jdqz(a, b, k, Target.LargestMagnitude, tol=tol)
jdqz_eigs = numpy.array(sorted(alpha / beta, key=lambda x: -abs(x)))
eigs = scipy.linalg.eigvals(a, b)
eigs = numpy.array(sorted(eigs, key=lambda x: -abs(x)))
eigs = eigs[:k]
assert_allclose(jdqz_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqz_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqz_smallest_real(dtype):
numpy.random.seed(1234)
tol = numpy.finfo(dtype).eps * 1e3
atol = tol * 10
n = 20
k = 5
a = generate_test_matrix([n, n], dtype)
b = generate_test_matrix([n, n], dtype)
alpha, beta = jdqz.jdqz(a, b, k, Target.SmallestRealPart, tol=tol)
jdqz_eigs = numpy.array(sorted(alpha / beta, key=lambda x: x.real))
eigs = scipy.linalg.eigvals(a, b)
eigs = numpy.array(sorted(eigs, key=lambda x: x.real))
eigs = eigs[:k]
assert_allclose(jdqz_eigs.real, eigs.real, rtol=0, atol=atol)
assert_allclose(abs(jdqz_eigs.imag), abs(eigs.imag), rtol=0, atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
def test_jdqz_largest_real(dtype):
| numpy.random.seed(1234) | numpy.random.seed |
import datetime
import numpy as np
import os
from riverrunner import context
from riverrunner import settings
import time
class TContext(context.Context):
"""mock database context
generates a mock database context for unit testing
Attributes:
weather_sources ([str]): list of possible weather sources
"""
def __init__(self):
super().__init__(settings.DATABASE_TEST)
self.weather_sources = ['NOAA', 'USGS', 'SNOW']
self.measurements_file_name = "measurements_file_for_test.csv"
def clear_dependency_data(self, session):
"""clear all data from mock db
Args:
session (Session): managed connection to mock db
Returns:
None
"""
self.clear_all_tables(session)
session.query(context.Address).delete()
session.query(context.State).delete()
session.commit()
@staticmethod
def clear_all_tables(session):
"""clear tables for unittest tear down
it is important that order of entities below remain in this order.
if not, the SQL statements will not complete from foreign key
dependency issues
Args:
session (Session): managed connection to mock db
Returns:
None
"""
entities = [
context.Prediction,
context.StationRiverDistance,
context.Measurement,
context.Metric,
context.Station,
context.RiverRun
]
for entity in entities:
session.query(entity).delete()
session.commit()
def generate_addresses(self, session):
"""generate a random set of addresses
generates and inserts a random set of addresses into the db
Args:
session (Session): managed connection to mock db
Returns:
None
"""
# fill a few foreign key dependencies
session.add(context.State(
short_name='WA',
long_name='Washington'
))
addresses = [
context.Address(
latitude=self.random_latitude(),
longitude=self.random_longitude(),
address='that street you know somewhere',
city='a city %s' % i,
county='King',
state='WA',
zip='a zip'
)
for i in range(5)
]
session.add_all(addresses)
session.commit()
def get_measurements_for_test(self, i, session):
"""generate a random set of measurements
Args:
i (int): number of measurements to generate
session (Session): managed connection to mock db
Return:
[Measurement]: list containing i random Measurements
"""
stations = self.get_stations_for_test(i, session)
session.add_all(stations)
metrics = self.get_metrics_for_test(i)
session.add_all(metrics)
session.commit()
measurements = []
for idx in range(i):
measurements.append(
context.Measurement(
station_id=np.random.choice(stations, 1)[0].station_id,
metric_id=np.random.choice(metrics, 1)[0].metric_id,
date_time=datetime.datetime.now(),
value=np.round(np.random.normal(10, 3, 1)[0], 3)
))
# make sure we don't generate duplicate keys
time.sleep(.001)
return measurements
def get_measurements_file_for_test(self, i, session):
"""generate a file with a random set of measurements
Args:
i (int): number of measurements to generate
session (Session): managed connection to mock db
Return:
file: file containing i random Measurements
"""
measurements = self.get_measurements_for_test(i, session)
with open(self.measurements_file_name, "w") as f:
for measurement in measurements:
f.write("{},{},{},{}\n".format(
measurement.date_time,
measurement.metric_id,
measurement.station_id,
measurement.value
))
return self.measurements_file_name
def remove_measurements_file_for_test(self):
"""remove file used for put_measurements tests"""
os.remove(self.measurements_file_name)
@staticmethod
def get_metrics_for_test(i):
"""generate a random set of metrics
Args:
i (int): number of measurements to generate
Return:
[Metric]: list containing i random Measurements
"""
return [
context.Metric(
metric_id=mid,
description='a metric description',
name='some kind of rate of change Copy(%s)' % mid,
units='thing per second'
) for mid in range(i)]
def get_predictions_for_test(self, i, session):
"""generate a random set of predictions
Args:
i (int): number of predictions to generate
session (Session): managed connection to mock db
Return:
[Prediction]: list containing i random predictions
"""
runs = self.get_runs_for_test(i, session)
session.add_all(runs)
session.commit()
predictions = []
for idx in range(i):
fr = np.random.uniform(100, 1000, 1)[0]
sd = np.random.normal(50, 10, 1)[0]
predictions.append(
context.Prediction(
run_id=np.random.choice(runs, 1)[0].run_id,
timestamp=datetime.datetime.now(),
fr_lb=np.round(fr - sd, 1),
fr=np.round(fr, 1),
fr_ub=np.round(fr + sd, 1)
)
)
# make sure we don't generate duplicate keys
time.sleep(.001)
return predictions
@staticmethod
def get_runs_for_test(i, session):
"""generate a random set of runs
Args:
i (int): number of runs to generate
session (Session): managed connection to mock db
Returns:
[RiverRun]: list containing i random runs
"""
addresses = session.query(context.Address).all()
runs = []
for idx in range(i):
put_in = np.random.choice(addresses, 1)[0]
take_out = np.random.choice(addresses, 1)[0]
runs.append(context.RiverRun(
run_id=idx,
class_rating=np.random.choice(['I', 'II', 'IV', 'V', 'GTFO'], 1)[0],
min_level=int(np.random.randint(0, 100, 1)[0]),
max_level=int(np.random.randint(100, 1000, 1)[0]),
put_in_latitude=put_in.latitude,
put_in_longitude=put_in.longitude,
distance=np.round( | np.random.uniform(5, 3, 1) | numpy.random.uniform |
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras.layers import Conv1D, MaxPooling1D, Dense, Flatten, Lambda, Concatenate
from keras.layers import Input
from keras.models import Model
from keras.utils import plot_model, to_categorical
from keras.callbacks import EarlyStopping
from numpy.core.multiarray import ndarray
from scipy.signal import savgol_filter
from helpers.io import inputter_train, inputter_test, outputter
from helpers.preprocessing import transform_proba
stopper = EarlyStopping(monitor='val_loss', min_delta=0, patience=2, verbose=1)
def build(_base_shape):
inputer = Input(shape=_base_shape, name='input')
split = Lambda(lambda x: tf.split(x, num_or_size_splits=3, axis=1))(inputer)
conv1 = Conv1D(filters=16, kernel_size=11, activation='relu', padding='valid', name='conv1')(split[0])
maxpool1 = MaxPooling1D()(conv1)
conv2 = Conv1D(filters=32, kernel_size=5, activation='relu', padding='valid', name='conv2')(maxpool1)
maxpool2 = MaxPooling1D()(conv2)
conv3 = Conv1D(filters=64, kernel_size=5, activation='relu', padding='valid', name='conv3')(maxpool2)
maxpool3 = MaxPooling1D()(conv3)
conv4 = Conv1D(filters=128, kernel_size=5, activation='relu', padding='valid', name='conv4')(maxpool3)
maxpool4_1 = MaxPooling1D()(conv4)
conv1 = Conv1D(filters=16, kernel_size=11, activation='relu', padding='valid', name='conv1_2')(split[1])
maxpool1 = MaxPooling1D()(conv1)
conv2 = Conv1D(filters=32, kernel_size=5, activation='relu', padding='valid', name='conv2_2')(maxpool1)
maxpool2 = MaxPooling1D()(conv2)
conv3 = Conv1D(filters=64, kernel_size=5, activation='relu', padding='valid', name='conv3_2')(maxpool2)
maxpool3 = MaxPooling1D()(conv3)
conv4 = Conv1D(filters=128, kernel_size=5, activation='relu', padding='valid', name='conv4_2')(maxpool3)
maxpool4_2 = MaxPooling1D()(conv4)
conv1 = Conv1D(filters=16, kernel_size=11, activation='relu', padding='valid', name='conv1_3')(split[1])
maxpool1 = MaxPooling1D()(conv1)
conv2 = Conv1D(filters=32, kernel_size=5, activation='relu', padding='valid', name='conv2_3')(maxpool1)
maxpool2 = MaxPooling1D()(conv2)
conv3 = Conv1D(filters=64, kernel_size=5, activation='relu', padding='valid', name='conv3_3')(maxpool2)
maxpool3 = MaxPooling1D()(conv3)
conv4 = Conv1D(filters=128, kernel_size=5, activation='relu', padding='valid', name='conv4_3')(maxpool3)
maxpool4_3 = MaxPooling1D()(conv4)
merger = Concatenate(axis=1)([maxpool4_1, maxpool4_2, maxpool4_3])
flatten = Flatten()(merger)
dense1 = Dense(1024, activation='relu', name='dense1')(flatten)
dense2 = Dense(512, activation='relu', name='dense2')(dense1)
outputer = Dense(3, activation='softmax')(dense2)
_model = Model(inputs=inputer, outputs=outputer) # type: Model
return _model
eeg1, eeg2, emg, lab = inputter_train()
print('Each data input shape: ', eeg1.shape)
data = np.concatenate((np.reshape(eeg1, (-1, 128)), np.reshape(eeg2, (-1, 128)), np.reshape(emg, (-1, 128))), axis=1)
data = data[..., np.newaxis]
print("Data format: ", data.shape)
del eeg1
del eeg2
del emg
print(lab.shape)
labels = np.reshape(lab, (-1, 1))
labels = np.concatenate((labels, labels, labels, labels), axis=1)
print(labels.shape)
labels = np.reshape(labels, (-1, 1))
labels = np.subtract(labels, 1)
labels = to_categorical(labels, num_classes=None) # type: ndarray
base_shape = (data.shape[1], data.shape[2])
print('Input shape: ', base_shape)
print('Label shape: ', labels.shape)
print('Input done.')
model = build(base_shape)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['categorical_accuracy'])
print(model.summary())
plot_model(model, to_file=os.getcwd() + '/data/' + str(time.strftime("%Y%m%d-%H%M%S")) + '_model.png', show_shapes=True,
show_layer_names=True, rankdir='TB')
print("Unique labels: ", np.unique(lab))
model.fit(data, labels, batch_size=128, epochs=50, verbose=1, validation_split=0.1,
callbacks=[stopper])
model.save_weights("/model/conv2d_model.h5")
eeg1_t, eeg2_t, emg_t = inputter_test()
data_t = np.concatenate((np.reshape(eeg1_t, (-1, 128)),
| np.reshape(eeg2_t, (-1, 128)) | numpy.reshape |
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
try:
import scipy.stats as stats
except ImportError:
pass
from .common import Benchmark
class Anderson_KSamp(Benchmark):
def setup(self, *args):
self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]
def time_anderson_ksamp(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
stats.anderson_ksamp(self.rand)
class CorrelationFunctions(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, mode):
a = | np.random.rand(2,2) | numpy.random.rand |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 15 17:34:46 2021
@author: a1
"""
import numpy as np
def zscore(A, size):
B=np.zeros(shape=[A.shape[0],size])
for j in range(size):
mean=np.mean(A[:,j])
std=np.std(A[:,j], ddof=1)
for i in range(A.shape[0]):
B[i,j] = (A[i,j] - mean) / std
return B
def dist(Z):
D=np.zeros(shape=[Z.shape[0],Z.shape[0]])
for i in range(Z.shape[0]):
point1=Z[i,:]
for j in range(Z.shape[0]):
point2=Z[j,:]
diff=point1 - point2
D[i,j] = np.sqrt(np.sum(diff*diff))
return D
def ROI_Main(I,Z):
d=dist(Z) #Eucledean Distance
Z=np.transpose(Z)
Y=np.transpose(np.percentile(d, [35, 50, 85], axis=1))
nin=d.shape[0]
NST=nin/2 #stations number divided by 2
d2=np.zeros(shape=[d.shape[0],d.shape[1]])
S =np.zeros(shape=[d.shape[0],d.shape[1]])
Nsi=np.zeros(shape=[d.shape[0],d.shape[1]])
D=np.zeros(shape=[d.shape[0],d.shape[1]])
S2=np.zeros(shape=[d.shape[0],d.shape[1]])
Ns=np.zeros(shape=[d.shape[0],d.shape[1]])
M=np.zeros(shape=[d.shape[0],d.shape[1]])
S_all=np.zeros(shape=[Z.shape[0],Z.shape[1]])
#Ss=np.zeros(shape=[Z.shape[0],Z.shape[1]])
for k in range(d.shape[0]):
d2[k,:]=d[k, d[k, :].argsort()]
for i in range(50):
S[i,:]=d2[i,:]<=Y[i,0] #number of station at minimum 25% percentile
Nsi[i,:]=np.sum(S[i,:])-1 #minimum distance at 25% percentile
D[i,:]=Y[i,0]+((Y[i,2]-Y[i,0])*((NST-Nsi[i,:])/(NST))) #main Eqn. to get the optimum distance
S2[i,:]=d2[i,:]<=D[i,:]
Ns[i,:]=sum(S2[i,:])-1 #Number of stations in ROI
M[i,:]=d[i,:]<=D[i,:] #stations by their number in ROI
imax=0
for j in range(50):
if M[I,j] == 1:
S_all[:,j]=Z[:,j]
imax=j
Ss=S_all[:,0:imax+1]
pin=imax
#Index for the number of cases
row=np.zeros(shape=[1,pin+1])
row[0,:]=range(1,(pin+2))
Ss_ser=np.append(row, Ss, axis=0)
idx = np.argwhere( | np.any(Ss_ser[..., :] == 0, axis=0) | numpy.any |
import numpy as np
import torch
import os
import glob
from utils.dataset_processing import image, grasp
from .grasp_data import GraspDatasetBase
class CameraData(GraspDatasetBase):
"""
Dataset wrapper for the camera data.
"""
def __init__(self, file_path, ds_rotate=0,
width=640,
height=480,
output_size=224,
include_depth=True,
include_rgb=True,
**kwargs):
"""
:param output_size: Image output size in pixels (square)
:param include_depth: Whether depth image is included
:param include_rgb: Whether RGB image is included
"""
super(CameraData, self).__init__(**kwargs)
self.output_size = output_size
self.include_depth = include_depth
self.include_rgb = include_rgb
self.depth_files = glob.glob(os.path.join(file_path, 'depth_*.npy'))
self.depth_files.sort()
self.rgb_files = glob.glob(os.path.join(file_path, 'color_*.png'))
self.rgb_files.sort()
self.length = len(self.depth_files)
if include_depth is False and include_rgb is False:
raise ValueError('At least one of Depth or RGB must be specified.')
left = (width - output_size) // 2
top = (height - output_size) // 2
right = (width + output_size) // 2
bottom = (height + output_size) // 2
self.bottom_right = (bottom, right)
self.top_left = (top, left)
@staticmethod
def numpy_to_torch(s):
if len(s.shape) == 2:
return torch.from_numpy(np.expand_dims(s, 0).astype(np.float32))
else:
return torch.from_numpy(s.astype(np.float32))
def get_gtbb(self, idx, rot=0, zoom=1.0):
rect = np.array([[
[0.0, 10.0],
[10.0, 10.0],
[10.0, 0.0],
[0.0, 0.0]
]])
gtbbs = grasp.GraspRectangles.load_from_array(rect)
c = self.output_size // 2
# gtbbs.rotate(rot, (c, c))
# gtbbs.zoom(zoom, (c, c))
return gtbbs
def get_depth(self, idx, rot=0, zoom=1.0, normalise=True):
arr = np.load(self.depth_files[idx])
depth_img = image.Image(arr)
depth_img.crop(bottom_right=self.bottom_right, top_left=self.top_left)
depth_img.rotate(rot)
# depth_img.zoom(zoom)
depth_img.resize((self.output_size, self.output_size))
# depth_img.resize((self.output_size, self.output_size))
# depth_img.img = depth_img.img.transpose((2, 0, 1))
if normalise:
depth_img.normalise()
return np.squeeze(depth_img.img)
def get_rgb(self, idx, rot=0, zoom=1.0, normalise=True):
rgb_img = image.Image.from_file(self.rgb_files[idx])
rgb_img.crop(bottom_right=self.bottom_right, top_left=self.top_left)
rgb_img.rotate(rot)
rgb_img.zoom(zoom)
rgb_img.resize((self.output_size, self.output_size))
if normalise:
rgb_img.normalise()
rgb_img.img = rgb_img.img.transpose((2, 0, 1))
return rgb_img.img
def get_data(self, rgb=None, depth=None):
depth_img = None
rgb_img = None
# Load the depth image
if self.include_depth:
depth_img = self.get_depth(img=depth)
# Load the RGB image
if self.include_rgb:
rgb_img = self.get_rgb(img=rgb)
if self.include_depth and self.include_rgb:
x = self.numpy_to_torch(
np.concatenate(
( | np.expand_dims(depth_img, 0) | numpy.expand_dims |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test numpy ops """
import numpy as np
import mindspore.numpy as mnp
from mindspore import Tensor
from mindspore.nn import Cell
import mindspore.context as context
from ....mindspore_test_framework.mindspore_test import mindspore_test
from ....mindspore_test_framework.pipeline.forward.compile_forward \
import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
context.set_context(mode=context.GRAPH_MODE)
class MeshGrid(Cell):
def construct(self, a, b, c, d):
ret = mnp.meshgrid(a, b, c, d)
return ret
class Choose(Cell):
def construct(self, a, b):
ret = mnp.choose(a, b)
return ret
class Histogram(Cell):
def construct(self, a):
ret = mnp.histogram(a)
return ret
class Norm(Cell):
def construct(self, a):
ret = mnp.norm(a)
return ret
class Cross(Cell):
def construct(self, a, b):
ret = mnp.cross(a, b)
return ret
class Stack(Cell):
def construct(self, a, b):
ret = mnp.stack((a, b))
return ret
class Correlate(Cell):
def construct(self, a, b):
ret = mnp.correlate(a, b)
return ret
class Split(Cell):
def construct(self, tensor):
a = mnp.split(tensor, indices_or_sections=1)
b = mnp.split(tensor, indices_or_sections=3)
c = mnp.array_split(tensor, indices_or_sections=1)
d = mnp.array_split(tensor, indices_or_sections=3, axis=-1)
return a, b, c, d
class MatrixPower(Cell):
def construct(self, tensor):
a = mnp.matrix_power(tensor, 3)
return a
class RavelMultiIndex(Cell):
def construct(self, tensor):
a = mnp.ravel_multi_index(tensor, (7, 6))
b = mnp.ravel_multi_index(tensor, (7, 6), order='F')
c = mnp.ravel_multi_index(tensor, (4, 6), mode='clip')
d = mnp.ravel_multi_index(tensor, (4, 4), mode='wrap')
return a, b, c, d
class GeomSpace(Cell):
def construct(self, start):
a = mnp.geomspace(1, 256, num=9)
b = mnp.geomspace(1, 256, num=8, endpoint=False)
c = mnp.geomspace(start, [1000, 2000, 3000], num=4)
d = mnp.geomspace(start, [1000, 2000, 3000], num=4, endpoint=False, axis=-1)
return a, b, c, d
class Arange(Cell):
def construct(self):
a = mnp.arange(10)
b = mnp.arange(0, 10)
c = mnp.arange(0.1, 9.9)
return a, b, c
class Eye(Cell):
def construct(self):
res = []
for n in range(1, 5):
for k in range(0, 5):
res.append(mnp.eye(10, n, k))
return res
class Trace(Cell):
def construct(self, arr):
a = mnp.trace(arr, offset=-1, axis1=0, axis2=1)
b = mnp.trace(arr, offset=0, axis1=1, axis2=0)
return a, b
class Pad(Cell):
def construct(self, arr1, arr2):
a = mnp.pad(arr1, ((1, 1), (2, 2), (3, 4)))
b = mnp.pad(arr1, ((1, 1), (2, 2), (3, 4)), mode="mean", stat_length=((1, 2), (2, 10), (3, 4)))
c = mnp.pad(arr1, ((1, 1), (2, 2), (3, 4)), mode="edge")
d = mnp.pad(arr1, ((1, 1), (2, 2), (3, 4)), mode="wrap")
e = mnp.pad(arr1, ((1, 3), (5, 2), (3, 0)), mode="linear_ramp", end_values=((0, 10), (9, 1), (-10, 99)))
f = mnp.pad(arr2, ((10, 13), (5, 12), (3, 0), (2, 6)), mode='symmetric', reflect_type='even')
g = mnp.pad(arr2, ((10, 13)), mode='reflect', reflect_type='even')
return a, b, c, d, e, f, g
class Where(Cell):
def construct(self, a, b, c):
ret = mnp.where(a, b, c)
return ret
class Select(Cell):
def construct(self, a, b):
ret = mnp.select(a, b)
return ret
class IsClose(Cell):
def construct(self, a, b):
ret = mnp.isclose(a, b)
return ret
class Average(Cell):
def construct(self, a):
ret = mnp.average(a)
return ret
class Remainder(Cell):
def construct(self, a, b):
ret = mnp.remainder(a, b)
return ret
class Diff(Cell):
def construct(self, a):
ret1 = mnp.diff(a)
ret2 = mnp.ediff1d(a)
return ret1, ret2
class Trapz(Cell):
def construct(self, arr):
a = mnp.trapz(arr, x=[-2, 1, 2], axis=1)
b = mnp.trapz(arr, dx=3, axis=0)
return a, b
class Lcm(Cell):
def construct(self, a, b):
ret = mnp.lcm(a, b)
return ret
class Cov(Cell):
def construct(self, a):
ret = mnp.cov(a, a)
return ret
class Gradient(Cell):
def construct(self, a):
ret = mnp.gradient(a)
return ret
class MultiDot(Cell):
def construct(self, a, b, c, d):
ret = mnp.multi_dot((a, b, c, d))
return ret
class Histogramdd(Cell):
def construct(self, a):
ret = mnp.histogramdd(a)
return ret
test_cases = [
('MeshGrid', {
'block': MeshGrid(),
'desc_inputs': [Tensor(np.full(3, 2, dtype=np.float32)),
Tensor(np.full(1, 5, dtype=np.float32)),
Tensor(np.full((2, 3), 9, dtype=np.float32)),
Tensor(np.full((4, 5, 6), 7, dtype=np.float32))],
}),
('Norm', {
'block': Norm(),
'desc_inputs': [Tensor(np.ones((5, 2, 3, 7), dtype=np.float32))],
}),
('Cross', {
'block': Cross(),
'desc_inputs': [Tensor(np.arange(18, dtype=np.int32).reshape(2, 3, 1, 3)),
Tensor(np.arange(9, dtype=np.int32).reshape(1, 3, 3))],
}),
('Stack', {
'block': Stack(),
'desc_inputs': [Tensor(np.arange(9, dtype=np.int32).reshape(3, 3)),
Tensor(np.arange(9, dtype=np.int32).reshape(3, 3)),],
}),
('Correlate', {
'block': Correlate(),
'desc_inputs': [Tensor(np.array([1, 2, 3, 4, 5], dtype=np.int32)),
Tensor(np.array([0, 1], dtype=np.int32)),],
}),
('Split', {
'block': Split(),
'desc_inputs': [Tensor(np.arange(9, dtype=np.float32).reshape(3, 3))],
}),
('MatrixPower', {
'block': MatrixPower(),
'desc_inputs': [Tensor(np.arange(9, dtype=np.float32).reshape(3, 3))],
}),
('RavelMultiIndex', {
'block': RavelMultiIndex(),
'desc_inputs': [Tensor(np.array([[3, 6, 6], [4, 5, 1]], dtype=np.int32))],
}),
('GeomSpace', {
'block': GeomSpace(),
'desc_inputs': [Tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))],
}),
('Arange', {
'block': Arange(),
'desc_inputs': [],
}),
('Eye', {
'block': Eye(),
'desc_inputs': [],
}),
('Trace', {
'block': Trace(),
'desc_inputs': [Tensor(np.ones((3, 5), dtype=np.float32))],
}),
('Where', {
'block': Where(),
'desc_inputs': [Tensor(np.full((1, 1, 2), [False, True])),
Tensor(np.full((1, 3, 2), 5, dtype=np.float32)),
Tensor(np.full((2, 1, 1), 7, dtype=np.float32))],
}),
('Select', {
'block': Select(),
'desc_inputs': [Tensor([[True, True, True, False, False], [False, False, True, False, True]]),
Tensor(np.array([[0, 1, 2, 3, 4], [0, 1, 4, 9, 16]], dtype=np.int32))],
}),
('IsClose', {
'block': IsClose(),
'desc_inputs': [Tensor(np.array([0, 1, 2, float('inf'), float('inf'), float('nan')], dtype=np.float32)),
Tensor(np.array([0, 1, -2, float('-inf'), float('inf'), float('nan')], dtype=np.float32))],
}),
('Average', {
'block': Average(),
'desc_inputs': [Tensor(np.array([[1., 2.], [3., 4.]], dtype=np.float32))],
}),
('Remainder', {
'block': Remainder(),
'desc_inputs': [Tensor(np.array([4, 7], dtype=np.int32)),
Tensor(np.array([[1, 2], [3, 4]], dtype=np.int32))],
}),
('Diff', {
'block': Diff(),
'desc_inputs': [Tensor( | np.array([1, 3, -1, 0, 4], dtype=np.int32) | numpy.array |
#!/usr/bin/env python
#
# Heart Rate and Electrocardiagram Simulation
# CSE 6730 Modeling and Simulation Project #2
# <NAME> and <NAME>
# Georgia Institute of Technology
# May 2016
#
# Imports -------------------------------------------------------------------- #
import sys
import time
import Queue
import numpy as np
import ConfigParser
from uuid import uuid4
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# Constants ------------------------------------------------------------------ #
ECG_AMP_MAX = +1.2 # Maximum ECG signal amplitude (mV)
ECG_AMP_MIN = -0.4 # Minimum ECG signal amplitude (mV)
ECG_NOISE_AMP = .2 # ECG Noise signal max amplitude (mV)
START_ACTIVITY = 1 # Start activity event flag
END_ACTIVITY = -1 # End activity event flag
# Classes -------------------------------------------------------------------- #
class Human(object):
"""Store characteristics of the human whose heart will be simulated."""
def __init__(self, verbose=False):
"""Create a human object with all default parameters."""
# Attributes to set once during creation of the object.
self.age = 45
self.gender = "male"
self.mass = 80
self.rhr = 60
self.vo2max = 11.0
# State variables that change throughout the simulation
self.hr = 60
self.cav = 7
self.intensity = 1
self.activity = None
# Control variables
self.verbose = verbose
def description(self):
"""A simple text description of the object."""
template = "The human subject is {} age {}"
return template.format(self.gender, self.age)
def change_activity(self, activity):
self.activity = activity
self._adjust_activity_level(activity.met)
def rest(self):
self.activity = None
self._adjust_activity_level(1.0)
def _adjust_activity_level(self, intensity):
"""Mutate the human object for the new activity level.
:param intensity: Activity intensity in units of METs.
"""
self.intensity = intensity
sv = self._calculate_stroke_volume()
self.cav = self._calculate_cav(intensity)
self.hr = 3.5*self.mass*intensity/sv/(self.cav/100)
if self.verbose:
print("HR = {} beats/min".format(self.hr))
def _initial_stroke_volume(self):
"""Calculate the human's resting stroke volume."""
sv = 3.5*self.mass/self.rhr/(self._calculate_cav(1.0)/100)
if self.verbose:
print("Initial SV = {} ml".format(sv))
return sv # mL
def _calculate_stroke_volume(self):
"""Calculate updated stroke volume.
This uses a linear approximation.
:return: Stroke volume (ml) scaled for activity level.
"""
max_increase = 0.65 # 65% increase at max
sv_init = self._initial_stroke_volume()
if self.intensity/self.vo2max >= 0.6:
# After 60% VO2max the SV has plateaued
sv = sv_init*(1 + max_increase)
elif self.intensity > 1:
# Model as linear increase up to plateau
sv = sv_init*(max_increase*(self.intensity - 1)/(0.6*self.vo2max - 1) + 1)
else:
# Keep resting SV
sv = sv_init
if self.verbose:
print("Scaled SV = {} ml".format(sv))
return sv
def _calculate_cav(self, intensity):
"""Calculate arteriovenous oxygen content difference (Cav).
:param intensity: Exercise intensity in units of METs.
"""
cav = 5.72 + 0.1047*(intensity/self.vo2max*100)
if self.verbose:
print("Cav = {} ml/100ml".format(cav))
return cav
class Activity(object):
"""Represents an activity that will change the average heart rate."""
def __init__(self, uid):
"""Create an activity object with all default values."""
self.type = "resting"
self.met = 1
self.start = 0
self.duration = 60
self.uid = uid
def description(self):
"""A simple text description of the object."""
template = "Perform {} activity for {} minutes"
return template.format(self.type, self.duration)
class Event(object):
""" """
def __init__(self, event_id, event_obj):
self.id = event_id
self.obj = event_obj
class HeartSimulation(object):
"""Simulate average HR based on activity level"""
def __init__(self, human, fel, verbose=False, visual=False):
self.human = human
self.fel = fel
self.avg_hr = []
self.verbose = verbose
self.visual = visual
def start_activity_event(self, time_stamp, event):
"""Start activity event handler.
:param time_stamp: Time stamp of the activity start.
:param event: Event object containing event information.
"""
activity = event.obj
if self.verbose:
if self.human.activity is not None:
print("End activity {} at time {}".format(self.human.activity.type, time_stamp))
print("\nStart activity {} at time {}".format(activity.type, time_stamp))
# Queue an event that will end the activity
new_event = Event(END_ACTIVITY, activity)
self.fel.put((time_stamp + activity.duration, new_event))
old_hr = self.human.hr
self.human.change_activity(activity)
if self.verbose:
print("delta HR = {}".format(self.human.hr - old_hr))
# Save the change in HR
self.avg_hr.append((time_stamp, self.human.hr, activity.met))
def end_activity_event(self, time_stamp, event):
"""End activity event handler.
:param time_stamp: Time stamp of the activity start.
:param event: Event object containing event information.
"""
activity = event.obj
# Check to see if the activity is still in progress.
if activity.uid == self.human.activity.uid:
if self.verbose:
print("End activity {} at time {}".format(activity.type, time_stamp))
old_hr = self.human.hr
self.human.rest() # Put the heart back at rest
if self.verbose:
print("delta HR = {}".format(self.human.hr - old_hr))
# Save the change in HR
self.avg_hr.append((time_stamp, self.human.hr, 1.0))
def run_simulation(self, output_file_path=None):
"""Run the discrete event heart rate simulation."""
# Process queued events
while not self.fel.empty():
# Get the next event with lowest time stamp value
now, event = self.fel.get()
# Call event handlers
if event.id == START_ACTIVITY:
self.start_activity_event(now, event)
elif event.id == END_ACTIVITY:
self.end_activity_event(now, event)
# Process the HR data to include transitions.
# Approximate transitions as 2 min linear transitions discretely stepped every 10 seconds.
# Assume the HR starts at rest
temp_events = []
t_step = 1./6
prev_hr = self.human.rhr
for n in range(len(self.avg_hr)):
t, hr, met = self.avg_hr[n]
if hr != prev_hr:
end_t = t + 2
if len(self.avg_hr) - 1 > n:
# check the next one
next_t = self.avg_hr[n + 1][0]
if next_t < end_t:
end_t = next_t
# Add transition steps
t_steps = np.arange(t, end_t + t_step, t_step)
hr_steps = np.linspace(prev_hr, hr, num=len(t_steps))
temp_events.extend([(ts, hr_steps[i], met) for i, ts in enumerate(t_steps)])
prev_hr = hr
# Write the HR data to the output file
self.avg_hr = temp_events
if output_file_path is not None:
with open(output_file_path, 'w') as hr_output_file:
for t, hr, met in self.avg_hr:
hr_output_file.write("{},{},{}\n".format(t, hr, met))
# If the visual flag is set plot the results.
if self.visual:
data = np.array(self.avg_hr)
plt.figure()
plt.plot(data[:, 0], data[:, 1])
plt.xlabel("Time (min)")
plt.ylabel("Average HR")
plt.grid(True)
plt.show()
return 0 # return status
class ECGSimulation(object):
"""Simulate realistic ECG signals based on inputs."""
def __init__(self, hr_list, visual=False):
self.hr_vector = hr_list
self.ecg_state = None
self.x0 = np.array([1, 0, 0.04])
self.visual = visual
def synthetic_ecg(self, hr_mean=60.0, no_amp=0.0, start_t=0, stop_t=10):
"""
:param hr_mean: Mean heart rate
:param no_amp: Noise amplitude
:param start_t: Signal start time in seconds
:param stop_t: signal stop time in seconds
:return: ECG signal array and corresponding time (sec) array
"""
# Settings ----------------------------------------------------------- #
Fs = 100.0 # sampling frequency (samples/sec)
sfecg = Fs # ECG sampling frequency [ Hertz]
sfint = Fs # ECG sampling frequency [ Hertz]
hr_std = 1.0 # Standard deviation of heart rate [1 beat per minute]
lfhfratio = 0.5 # LF/HF ratio [0.5]
ti = np.radians([-60, -15, 0, 15, 90]) # P Q R S T ti = angles of extrema degrees [radians]
ai = np.array([1.2, -5, 30, -7.5, 0.75]) # ai = z-position of extrema [1.2 -5 30 -7.5 0.75]
bi = np.array([0.25, 0.1, 0.1, 0.1, 0.4]) # bi = Gaussian width of peaks [0.25 0.1 0.1 0.1 0.4]
min_rand(9843) # Seed the RNG
# -------------------------------------------------------------------- #
n = int(stop_t - start_t) # time in sec
# Adjust extrema parameters for mean heart rate
hrfact = np.sqrt(hr_mean/60.0)
hrfact2 = np.sqrt(hrfact)
bi = hrfact*bi
ti = np.array([hrfact2, hrfact, 1., hrfact, hrfact2])*ti
q = np.round(sfint/sfecg)
# frequency parameters for rr process flo and fhi are the Mayer waves
# and respiratory rate respectively
flo = 0.1
fhi = 0.25
flo_std = 0.01
fhi_std = 0.01
# Compute the RR-interval which is the time between successive R-peaks,
# the inverse of this time interval gives the instantaneous heart rate.
sfrr = 1.0 # sampling frequency
w1 = 2*np.pi*flo
w2 = 2*np.pi*fhi
c1 = 2*np.pi*flo_std
c2 = 2*np.pi*fhi_std
sig2 = 1.
sig1 = lfhfratio
rr_mean = 60./hr_mean
rr_std = 60.*hr_std/(hr_mean*hr_mean)
df = sfrr/n
w = np.arange(n)*2*np.pi*df
dw1 = w - w1
dw2 = w - w2
hw1 = sig1* | np.exp(-0.5*(dw1/c1)**2) | numpy.exp |
import numpy as np
import gym
from gym import spaces
# from attn_toy.env.fourrooms import Fourrooms as Fourrooms
from attn_toy.env.fourrooms import FourroomsNorender as Fourrooms
class ImageInputWarpper(gym.Wrapper):
def __init__(self, env, max_steps=100):
gym.Wrapper.__init__(self, env)
screen_height = self.env.obs_height
screen_width = self.env.obs_width
self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8)
# self.num_steps = 0
self.max_steps = max_steps
# self.state_space_capacity = self.env.state_space_capacity
self.mean_obs = None
def step(self, action):
state, reward, done, info = self.env.step(action)
# self.num_steps += 1
if self.num_steps >= self.max_steps:
done = True
obs = self.env.render(state)
# print("step reporting",done)
# if self.mean_obs is None:
# self.mean_obs = np.mean(obs)
# print("what is wrong?",self.mean_obs)
# obs = obs - 0.5871700112336601
# info['ori_obs'] = ori_obs
info['s_tp1'] = state
return obs, reward, done, info
def reset(self, state=-1):
if state < 0:
state = np.random.randint(0, self.state_space_capacity)
self.env.reset(state)
# self.num_steps = self.env.num_steps
obs = self.env.render(state)
# print("reset reporting")
# if self.mean_obs is None:
# self.mean_obs = np.mean(obs)
# print("what is wrong? reset",self.mean_obs)
# obs = obs - 0.5871700112336601
# info['ori_obs'] = ori_obs
return obs.astype(np.uint8)
class FourroomsDynamicNoise(Fourrooms): # noise type = dynamic relevant
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77):
np.random.seed(seed)
super(FourroomsDynamicNoise, self).__init__(max_epilen, goal)
self.obs_size = obs_size
self.obs_height = obs_size
self.obs_width = obs_size
self.background = np.random.randint(0, 255, (10, 1, 1, 3))
self.background[:, :, :, 2] = 0
self.background = np.tile(self.background, (1, obs_size, obs_size, 1))
self.seed = seed
self.color = np.random.randint(100, 255, (200, 3))
self.color[:, 2] = 100
self.observation_space = spaces.Discrete(self.num_pos * 3)
self.state_space_capacity = self.observation_space.n
def render(self, state=-1):
which_background = state // self.num_pos
# obs = np.zeros((self.obs_size, self.obs_size, 3))
# obs[:12, :12, :] = self.color[state + 1]
# obs = np.random.randint(0, 255, (self.obs_size, self.obs_size, 3))
obs = np.tile(self.color[which_background][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
# obs = (state+100) * np.ones((self.obs_size,self.obs_size))
arr = super(FourroomsDynamicNoise, self).render(state)
padding_height, padding_width = (obs.shape[0] - arr.shape[0]) // 2, (obs.shape[1] - arr.shape[1]) // 2
obs[padding_height:padding_height + arr.shape[0], padding_width:padding_width + arr.shape[1], :] = arr
return obs.astype(np.uint8)
def step(self, action):
state, reward, done, info = super(FourroomsDynamicNoise, self).step(action)
state += self.num_pos * (self.num_steps % 3)
return state, reward, done, info
def reset(self, state=-1):
obs = super(FourroomsDynamicNoise, self).reset(state % self.num_pos)
self.num_steps = state % 3
return state
class FourroomsDynamicNoise2(Fourrooms): # noise type = state relevant
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77):
np.random.seed(seed)
super(FourroomsDynamicNoise2, self).__init__(max_epilen, goal)
self.obs_size = obs_size
self.obs_height = obs_size
self.obs_width = obs_size
self.background = np.random.randint(0, 255, (10, 1, 1, 3))
self.background[:, :, :, 2] = 0
self.background = np.tile(self.background, (1, obs_size, obs_size, 1))
self.seed = seed
self.color = np.random.randint(100, 255, (200, 3))
self.color[:, 2] = 100
self.observation_space = spaces.Discrete(self.num_pos * max_epilen)
self.state_space_capacity = self.num_pos * max_epilen
self.last_action = -1
def step(self, action):
state, reward, done, info = super(FourroomsDynamicNoise2, self).step(action)
state += self.num_pos * self.num_steps
return state, reward, done, info
def reset(self, state=-1):
self.state = state
obs = super(FourroomsDynamicNoise2, self).reset(state % self.num_pos)
self.num_steps = state // self.num_pos
return state
def render(self, state=-1):
# which_background = self.num_steps % 3
# obs = np.zeros((self.obs_size, self.obs_size, 3))
# obs[:12, :12, :] = self.color[state + 1]
obs = np.tile(self.color[self.num_steps + 1][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
# obs = np.random.randint(0, 255, (self.obs_size, self.obs_size, 3))
# obs = np.tile(self.color[which_background][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
# obs = (state+100) * np.ones((self.obs_size,self.obs_size))
arr = super(FourroomsDynamicNoise2, self).render(state % self.num_pos)
padding_height, padding_width = (obs.shape[0] - arr.shape[0]) // 2, (obs.shape[1] - arr.shape[1]) // 2
obs[padding_height:padding_height + arr.shape[0], padding_width:padding_width + arr.shape[1], :] = arr
return obs.astype(np.uint8)
class FourroomsDynamicNoise3(Fourrooms): # noise type = action relevant
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77):
np.random.seed(seed)
super(FourroomsDynamicNoise3, self).__init__(max_epilen, goal)
self.agent_color = np.tile(np.array([[1, 0, 0]]), (100, 1))
self.obs_size = obs_size
self.obs_height = obs_size
self.obs_width = obs_size
self.background = np.random.randint(0, 255, (10, 1, 1, 3))
self.background[:, :, :, 2] = 0
self.background = np.tile(self.background, (1, obs_size, obs_size, 1))
self.seed = seed
self.color = np.random.randint(100, 255, (200, 3))
self.color[:, 2] = 100
self.observation_space = spaces.Discrete(self.num_pos * self.action_space.n)
self.state_space_capacity = self.observation_space.n
def render(self, state=-1):
which_background = state // self.num_pos
# obs = np.zeros((self.obs_size, self.obs_size, 3))
# obs[:12, :12, :] = self.color[state + 1]
# print(which_background, self.color[which_background])
# obs = np.random.randint(0, 255, (self.obs_size, self.obs_size, 3))
obs = np.tile(self.color[which_background][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
# obs = (state+100) * np.ones((self.obs_size,self.obs_size))
arr = super(FourroomsDynamicNoise3, self).render(state)
padding_height, padding_width = (obs.shape[0] - arr.shape[0]) // 2, (obs.shape[1] - arr.shape[1]) // 2
obs[padding_height:padding_height + arr.shape[0], padding_width:padding_width + arr.shape[1], :] = arr
return obs.astype(np.uint8)
def step(self, action):
state, reward, done, info = super(FourroomsDynamicNoise3, self).step(action)
state += self.num_pos * action
# print("state in step",state)
return state, reward, done, info
def reset(self, state=-1):
obs = super(FourroomsDynamicNoise3, self).reset(state % self.num_pos)
self.num_steps = state // self.num_pos
return state
class FourroomsRandomNoise(Fourrooms): # noise type = random
def __init__(self, max_epilen=100, obs_size=128, seed=0, goal=77):
np.random.seed(seed)
super(FourroomsRandomNoise, self).__init__(max_epilen, goal)
self.obs_size = obs_size
self.obs_height = obs_size
self.obs_width = obs_size
# self.background = np.random.randint(0, 255, (10, 1, 1, 3))
# self.background[:, :, :, 2] = 0
# self.background = np.tile(self.background, (1, obs_size, obs_size, 1))
self.random_background = np.random.randint(0, 255, (100, obs_size, obs_size, 3))
# self.random_background[..., 2] = 100
self.seed = seed
self.color = np.random.randint(100, 255, (200, 3))
# self.color[:, 2] = 100
self.rand_range = 100
self.observation_space = spaces.Discrete(self.num_pos * self.rand_range)
self.state_space_capacity = self.observation_space.n
self.which_background = -1
def render(self, state=-1):
which_background = state // self.num_pos
# obs = np.zeros((self.obs_size, self.obs_size, 3))
# obs[:12, :12, :] = self.color[state + 1]
# obs = np.random.randint(0, 255, (self.obs_size, self.obs_size, 3))
# obs = np.tile(self.color[which_background][np.newaxis, np.newaxis, :], (self.obs_size, self.obs_size, 1))
obs = self.random_background[which_background]
# obs = (state+100) * np.ones((self.obs_size,self.obs_size))
arr = super(FourroomsRandomNoise, self).render(state % self.num_pos)
padding_height, padding_width = (obs.shape[0] - arr.shape[0]) // 2, (obs.shape[1] - arr.shape[1]) // 2
obs[padding_height:padding_height + arr.shape[0], padding_width:padding_width + arr.shape[1], :] = arr
return obs.astype(np.uint8)
def step(self, action):
state, reward, done, info = super(FourroomsRandomNoise, self).step(action)
self.which_background = | np.random.randint(0, self.rand_range) | numpy.random.randint |
Subsets and Splits